From 893dbc0259bbe03f68d7d5b53cde4482250f0f96 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:38 +0200 Subject: [PATCH] core: merge core_mmu_init_phys_mem() and core_mmu_init_virtualization() Moves the implementation of core_mmu_init_virtualization() into core_mmu_init_phys_mem(). This simplifies init_primary() in core/arch/arm/kernel/boot.c. Signed-off-by: Jens Wiklander --- core/arch/arm/kernel/boot.c | 7 +------ core/include/mm/core_mmu.h | 2 -- core/mm/core_mmu.c | 34 +++++++++++++++++----------------- 3 files changed, 18 insertions(+), 25 deletions(-) diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c index 2f9185da039..8046fd19945 100644 --- a/core/arch/arm/kernel/boot.c +++ b/core/arch/arm/kernel/boot.c @@ -927,12 +927,7 @@ static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) #endif core_mmu_save_mem_map(); - if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { - IMSG("Initializing virtualization support"); - core_mmu_init_virtualization(); - } else { - core_mmu_init_phys_mem(); - } + core_mmu_init_phys_mem(); boot_mem_release_unused(); IMSG_RAW("\n"); diff --git a/core/include/mm/core_mmu.h b/core/include/mm/core_mmu.h index cdbc0fedefb..5db42eedf44 100644 --- a/core/include/mm/core_mmu.h +++ b/core/include/mm/core_mmu.h @@ -640,8 +640,6 @@ void core_mmu_set_default_prtn(void); void core_mmu_set_default_prtn_tbl(void); #endif -void core_mmu_init_virtualization(void); - /* Initialize physical memory pool */ void core_mmu_init_phys_mem(void); diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index 7a8d22015b4..b8b330c1922 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -2588,20 +2588,6 @@ bool is_nexus(const void *va) } #endif -void core_mmu_init_virtualization(void) -{ - paddr_t b1 = 0; - paddr_size_t s1 = 0; - - static_assert(ARRAY_SIZE(secure_only) <= 2); - if (ARRAY_SIZE(secure_only) == 2) { - b1 = secure_only[1].paddr; - s1 = secure_only[1].size; - } - virt_init_memory(&static_memory_map, secure_only[0].paddr, - secure_only[0].size, b1, s1); -} - vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len) { assert(p->pa); @@ -2682,10 +2668,22 @@ static void __maybe_unused carve_out_core_mem(paddr_t pa, paddr_t end_pa) void core_mmu_init_phys_mem(void) { - paddr_t ps = 0; - size_t size = 0; + /* + * Get virtual addr/size of RAM where TA are loaded/executedNSec + * shared mem allocated from teecore. + */ + if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { + paddr_t b1 = 0; + paddr_size_t s1 = 0; - if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) { + static_assert(ARRAY_SIZE(secure_only) <= 2); + if (ARRAY_SIZE(secure_only) == 2) { + b1 = secure_only[1].paddr; + s1 = secure_only[1].size; + } + virt_init_memory(&static_memory_map, secure_only[0].paddr, + secure_only[0].size, b1, s1); + } else { #ifdef CFG_WITH_PAGER /* * The pager uses all core memory so there's no need to add @@ -2696,6 +2694,8 @@ void core_mmu_init_phys_mem(void) #else /*!CFG_WITH_PAGER*/ size_t align = BIT(CORE_MMU_USER_CODE_SHIFT); paddr_t end_pa = 0; + size_t size = 0; + paddr_t ps = 0; paddr_t pa = 0; static_assert(ARRAY_SIZE(secure_only) <= 2);