diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c index dc86bb1e13c..b29f35ab86a 100644 --- a/core/arch/arm/kernel/boot.c +++ b/core/arch/arm/kernel/boot.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -25,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -82,6 +84,8 @@ static void *manifest_dt __nex_bss; static unsigned long boot_arg_fdt __nex_bss; static unsigned long boot_arg_nsec_entry __nex_bss; static unsigned long boot_arg_pageable_part __nex_bss; +static unsigned long boot_arg_transfer_list __nex_bss; +static struct transfer_list_header *mapped_tl __nex_bss; #ifdef CFG_SECONDARY_INIT_CNTFRQ static uint32_t cntfrq; @@ -1252,15 +1256,53 @@ void __weak boot_init_primary_early(void) { unsigned long pageable_part = 0; unsigned long e = PADDR_INVALID; + struct transfer_list_entry *te = NULL; if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) e = boot_arg_nsec_entry; - if (IS_ENABLED(CFG_WITH_PAGER)) - pageable_part = boot_arg_pageable_part; + + if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) { + /* map and save the TL */ + mapped_tl = transfer_list_map(boot_arg_transfer_list); + if (!mapped_tl) + panic("Failed to map transfer list"); + te = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART); + } + + if (IS_ENABLED(CFG_WITH_PAGER)) { + if (IS_ENABLED(CFG_TRANSFER_LIST) && te) + pageable_part = get_le64(transfer_list_entry_data(te)); + else + pageable_part = boot_arg_pageable_part; + } init_primary(pageable_part, e); } +static void boot_save_transfer_list(unsigned long zero_reg, + unsigned long transfer_list, + unsigned long fdt) +{ + struct transfer_list_header *tl = (void *)transfer_list; + struct transfer_list_entry *te = NULL; + + if (zero_reg != 0) + panic("Incorrect transfer list register convention"); + + if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) || + !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment))) + panic("Transfer list base address is not aligned"); + + if (transfer_list_check_header(tl) == TL_OPS_NONE) + panic("Invalid transfer list"); + + te = transfer_list_find(tl, TL_TAG_FDT); + if (fdt != (unsigned long)transfer_list_entry_data(te)) + panic("DT does not match to the DT entry of the TL"); + + boot_arg_transfer_list = transfer_list; +} + #if defined(CFG_WITH_ARM_TRUSTED_FW) unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused, unsigned long a1 __unused) @@ -1441,13 +1483,14 @@ static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size) *size = num; } -void __weak boot_save_args(unsigned long a0, unsigned long a1 __unused, - unsigned long a2 __maybe_unused, - unsigned long a3 __unused, +void __weak boot_save_args(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4 __maybe_unused) { /* * Register use: + * + * Scenario A: Default arguments * a0 - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n: * if non-NULL holds the TOS FW config [1] address * - CFG_CORE_FFA=y && @@ -1471,8 +1514,31 @@ void __weak boot_save_args(unsigned long a0, unsigned long a1 __unused, * here. This is also called Manifest DT, related to the Manifest DT * passed in the FF-A Boot Information Blob, but with a different * compatible string. + + * Scenario B: FW Handoff via Transfer List + * Note: FF-A and non-secure entry are not yet supported with + * Transfer List + * a0 - DTB address or 0 (AArch64) + * - must be 0 (AArch32) + * a1 - TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK + * a2 - must be 0 (AArch64) + * - DTB address or 0 (AArch32) + * a3 - Transfer list base address + * a4 - Not used */ + if (IS_ENABLED(CFG_TRANSFER_LIST) && + a1 == (TRANSFER_LIST_SIGNATURE | REG_CONVENTION_VER_MASK)) { + if (IS_ENABLED(CFG_ARM64_core)) { + boot_save_transfer_list(a2, a3, a0); + boot_arg_fdt = a0; + } else { + boot_save_transfer_list(a0, a3, a2); + boot_arg_fdt = a2; + } + return; + } + if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) { #if defined(CFG_DT_ADDR) boot_arg_fdt = CFG_DT_ADDR; @@ -1512,3 +1578,41 @@ void __weak boot_save_args(unsigned long a0, unsigned long a1 __unused, } } } + +#if defined(CFG_TRANSFER_LIST) +static TEE_Result release_transfer_list(void) +{ + struct dt_descriptor *dt = get_external_dt_desc(); + + if (!mapped_tl) + return TEE_SUCCESS; + + if (dt) { + int ret = 0; + struct transfer_list_entry *te = NULL; + + /* + * Pack the DTB and update the transfer list before un-mapping + */ + ret = fdt_pack(dt->blob); + if (ret < 0) { + EMSG("Failed to pack Device Tree at 0x%" PRIxPA + ": error %d", virt_to_phys(dt->blob), ret); + panic(); + } + + te = transfer_list_find(mapped_tl, TL_TAG_FDT); + assert(dt->blob == transfer_list_entry_data(te)); + transfer_list_set_data_size(mapped_tl, te, + fdt_totalsize(dt->blob)); + dt->blob = NULL; + } + + transfer_list_unmap_sync(mapped_tl); + mapped_tl = NULL; + + return TEE_SUCCESS; +} + +boot_final(release_transfer_list); +#endif diff --git a/core/kernel/dt.c b/core/kernel/dt.c index 299aba11017..8388a717e0c 100644 --- a/core/kernel/dt.c +++ b/core/kernel/dt.c @@ -547,8 +547,8 @@ struct dt_descriptor *get_external_dt_desc(void) void init_external_dt(unsigned long phys_dt) { struct dt_descriptor *dt = &external_dt; - void *fdt = NULL; int ret = 0; + enum teecore_memtypes mtype = MEM_AREA_MAXTYPE; if (!IS_ENABLED(CFG_EXTERNAL_DT)) return; @@ -566,11 +566,22 @@ void init_external_dt(unsigned long phys_dt) return; } - fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE); - if (!fdt) - panic("Failed to map external DTB"); - - dt->blob = fdt; + mtype = core_mmu_get_type_by_pa(phys_dt); + if (mtype == MEM_AREA_MAXTYPE) { + /* Map the DTB if it is not yet mapped */ + dt->blob = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, + CFG_DTB_MAX_SIZE); + if (!dt->blob) + panic("Failed to map external DTB"); + } else { + /* Get the DTB address if already mapped in a memory area */ + dt->blob = phys_to_virt(phys_dt, mtype, CFG_DTB_MAX_SIZE); + if (!dt->blob) { + EMSG("Failed to get a mapped external DTB for PA %#" + PRIxPA, phys_dt); + panic(); + } + } ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE); if (ret < 0) { @@ -579,7 +590,7 @@ void init_external_dt(unsigned long phys_dt) panic(); } - ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE); + ret = fdt_open_into(dt->blob, dt->blob, CFG_DTB_MAX_SIZE); if (ret < 0) { EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret); panic(); @@ -600,6 +611,7 @@ void *get_external_dt(void) static TEE_Result release_external_dt(void) { int ret = 0; + paddr_t pa_dt = 0; if (!IS_ENABLED(CFG_EXTERNAL_DT)) return TEE_SUCCESS; @@ -607,6 +619,14 @@ static TEE_Result release_external_dt(void) if (!external_dt.blob) return TEE_SUCCESS; + pa_dt = virt_to_phys(external_dt.blob); + /* + * Skip packing and un-mapping operations if the external DTB is mapped + * in a different memory area + */ + if (core_mmu_get_type_by_pa(pa_dt) != MEM_AREA_EXT_DT) + return TEE_SUCCESS; + ret = fdt_pack(external_dt.blob); if (ret < 0) { EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",