From bfb714a2f8411f071aac45ecfcfb52c8c2859c2b Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Wed, 10 Jul 2024 18:07:10 +0200 Subject: [PATCH] core: mm: ensure all pager VA space is mapped with small pages Fix can_map_at_level() to ensure all memory areas related the pager pageable virtual memory are mapped with small pages. This change fixes an issue found when the pager physical RAM ends on a section boundary (e.g. 512MB or 2MB on LPAE case) making the virtual memory mapping above that boundary to be prepared with pgdir or wider MMU tables while pager implementation expects 4kB page MMU tables. Signed-off-by: Etienne Carriere Acked-by: Jens Wiklander --- core/mm/core_mmu.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index 5e1acf0d61e..4926f74bd2e 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -1805,10 +1805,11 @@ static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr, #ifdef CFG_WITH_PAGER /* - * If pager is enabled, we need to map tee ram + * If pager is enabled, we need to map TEE RAM and the whole pager * regions with small pages only */ - if (map_is_tee_ram(mm) && block_size != SMALL_PAGE_SIZE) + if ((map_is_tee_ram(mm) || mm->type == MEM_AREA_PAGER_VASPACE) && + block_size != SMALL_PAGE_SIZE) return false; #endif