diff options
Diffstat (limited to '')
-rw-r--r-- | lib/xlat_tables_v2/aarch32/enable_mmu.S | 120 | ||||
-rw-r--r-- | lib/xlat_tables_v2/aarch32/xlat_tables_arch.c | 260 | ||||
-rw-r--r-- | lib/xlat_tables_v2/aarch64/enable_mmu.S | 97 | ||||
-rw-r--r-- | lib/xlat_tables_v2/aarch64/xlat_tables_arch.c | 324 | ||||
-rw-r--r-- | lib/xlat_tables_v2/ro_xlat_tables.mk | 41 | ||||
-rw-r--r-- | lib/xlat_tables_v2/xlat_tables.mk | 19 | ||||
-rw-r--r-- | lib/xlat_tables_v2/xlat_tables_context.c | 270 | ||||
-rw-r--r-- | lib/xlat_tables_v2/xlat_tables_core.c | 1244 | ||||
-rw-r--r-- | lib/xlat_tables_v2/xlat_tables_private.h | 110 | ||||
-rw-r--r-- | lib/xlat_tables_v2/xlat_tables_utils.c | 592 |
10 files changed, 3077 insertions, 0 deletions
diff --git a/lib/xlat_tables_v2/aarch32/enable_mmu.S b/lib/xlat_tables_v2/aarch32/enable_mmu.S new file mode 100644 index 0000000..f2fff36 --- /dev/null +++ b/lib/xlat_tables_v2/aarch32/enable_mmu.S @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <asm_macros.S> +#include <assert_macros.S> +#include <lib/xlat_tables/xlat_tables_v2.h> + + .global enable_mmu_direct_svc_mon + .global enable_mmu_direct_hyp + + /* void enable_mmu_direct_svc_mon(unsigned int flags) */ +func enable_mmu_direct_svc_mon + /* Assert that MMU is turned off */ +#if ENABLE_ASSERTIONS + ldcopr r1, SCTLR + tst r1, #SCTLR_M_BIT + ASM_ASSERT(eq) +#endif + + /* Invalidate TLB entries */ + TLB_INVALIDATE(r0, TLBIALL) + + mov r3, r0 + ldr r0, =mmu_cfg_params + + /* MAIR0. Only the lower 32 bits are used. */ + ldr r1, [r0, #(MMU_CFG_MAIR << 3)] + stcopr r1, MAIR0 + + /* TTBCR. Only the lower 32 bits are used. */ + ldr r2, [r0, #(MMU_CFG_TCR << 3)] + stcopr r2, TTBCR + + /* TTBR0 */ + ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)] + ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)] + stcopr16 r1, r2, TTBR0_64 + + /* TTBR1 is unused right now; set it to 0. */ + mov r1, #0 + mov r2, #0 + stcopr16 r1, r2, TTBR1_64 + + /* + * Ensure all translation table writes have drained into memory, the TLB + * invalidation is complete, and translation register writes are + * committed before enabling the MMU + */ + dsb ish + isb + + /* Enable enable MMU by honoring flags */ + ldcopr r1, SCTLR + ldr r2, =(SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT) + orr r1, r1, r2 + + /* Clear C bit if requested */ + tst r3, #DISABLE_DCACHE + bicne r1, r1, #SCTLR_C_BIT + + stcopr r1, SCTLR + isb + + bx lr +endfunc enable_mmu_direct_svc_mon + + + /* void enable_mmu_direct_hyp(unsigned int flags) */ +func enable_mmu_direct_hyp + /* Assert that MMU is turned off */ +#if ENABLE_ASSERTIONS + ldcopr r1, HSCTLR + tst r1, #HSCTLR_M_BIT + ASM_ASSERT(eq) +#endif + + /* Invalidate TLB entries */ + TLB_INVALIDATE(r0, TLBIALL) + + mov r3, r0 + ldr r0, =mmu_cfg_params + + /* HMAIR0 */ + ldr r1, [r0, #(MMU_CFG_MAIR << 3)] + stcopr r1, HMAIR0 + + /* HTCR */ + ldr r2, [r0, #(MMU_CFG_TCR << 3)] + stcopr r2, HTCR + + /* HTTBR */ + ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)] + ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)] + stcopr16 r1, r2, HTTBR_64 + + /* + * Ensure all translation table writes have drained into memory, the TLB + * invalidation is complete, and translation register writes are + * committed before enabling the MMU + */ + dsb ish + isb + + /* Enable enable MMU by honoring flags */ + ldcopr r1, HSCTLR + ldr r2, =(HSCTLR_WXN_BIT | HSCTLR_C_BIT | HSCTLR_M_BIT) + orr r1, r1, r2 + + /* Clear C bit if requested */ + tst r3, #DISABLE_DCACHE + bicne r1, r1, #HSCTLR_C_BIT + + stcopr r1, HSCTLR + isb + + bx lr +endfunc enable_mmu_direct_hyp diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c new file mode 100644 index 0000000..a1a44af --- /dev/null +++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stdbool.h> + +#include <platform_def.h> + +#include <arch.h> +#include <arch_features.h> +#include <arch_helpers.h> +#include <lib/cassert.h> +#include <lib/utils_def.h> +#include <lib/xlat_tables/xlat_tables_v2.h> + +#include "../xlat_tables_private.h" + +#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING) +#error ARMv7 target does not support LPAE MMU descriptors +#endif + +/* + * Returns true if the provided granule size is supported, false otherwise. + */ +bool xlat_arch_is_granule_size_supported(size_t size) +{ + /* + * The library uses the long descriptor translation table format, which + * supports 4 KiB pages only. + */ + return size == PAGE_SIZE_4KB; +} + +size_t xlat_arch_get_max_supported_granule_size(void) +{ + return PAGE_SIZE_4KB; +} + +/* + * Determine the physical address space encoded in the 'attr' parameter. + * + * The physical address will fall into one of two spaces; secure or + * nonsecure. + */ +uint32_t xlat_arch_get_pas(uint32_t attr) +{ + uint32_t pas = MT_PAS(attr); + + if (pas == MT_NS) { + return LOWER_ATTRS(NS); + } else { /* MT_SECURE */ + return 0U; + } +} + +#if ENABLE_ASSERTIONS +unsigned long long xlat_arch_get_max_supported_pa(void) +{ + /* Physical address space size for long descriptor format. */ + return (1ULL << 40) - 1ULL; +} + +/* + * Return minimum virtual address space size supported by the architecture + */ +uintptr_t xlat_get_min_virt_addr_space_size(void) +{ + return MIN_VIRT_ADDR_SPACE_SIZE; +} +#endif /* ENABLE_ASSERTIONS*/ + +bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx) +{ + if (ctx->xlat_regime == EL1_EL0_REGIME) { + assert(xlat_arch_current_el() == 1U); + return (read_sctlr() & SCTLR_M_BIT) != 0U; + } else { + assert(ctx->xlat_regime == EL2_REGIME); + assert(xlat_arch_current_el() == 2U); + return (read_hsctlr() & HSCTLR_M_BIT) != 0U; + } +} + +bool is_dcache_enabled(void) +{ + if (IS_IN_EL2()) { + return (read_hsctlr() & HSCTLR_C_BIT) != 0U; + } else { + return (read_sctlr() & SCTLR_C_BIT) != 0U; + } +} + +uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime) +{ + if (xlat_regime == EL1_EL0_REGIME) { + return UPPER_ATTRS(XN) | UPPER_ATTRS(PXN); + } else { + assert(xlat_regime == EL2_REGIME); + return UPPER_ATTRS(XN); + } +} + +void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime) +{ + /* + * Ensure the translation table write has drained into memory before + * invalidating the TLB entry. + */ + dsbishst(); + + if (xlat_regime == EL1_EL0_REGIME) { + tlbimvaais(TLBI_ADDR(va)); + } else { + assert(xlat_regime == EL2_REGIME); + tlbimvahis(TLBI_ADDR(va)); + } +} + +void xlat_arch_tlbi_va_sync(void) +{ + /* Invalidate all entries from branch predictors. */ + bpiallis(); + + /* + * A TLB maintenance instruction can complete at any time after + * it is issued, but is only guaranteed to be complete after the + * execution of DSB by the PE that executed the TLB maintenance + * instruction. After the TLB invalidate instruction is + * complete, no new memory accesses using the invalidated TLB + * entries will be observed by any observer of the system + * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph + * "Ordering and completion of TLB maintenance instructions". + */ + dsbish(); + + /* + * The effects of a completed TLB maintenance instruction are + * only guaranteed to be visible on the PE that executed the + * instruction after the execution of an ISB instruction by the + * PE that executed the TLB maintenance instruction. + */ + isb(); +} + +unsigned int xlat_arch_current_el(void) +{ + if (IS_IN_HYP()) { + return 2U; + } else { + assert(IS_IN_SVC() || IS_IN_MON()); + /* + * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, + * System, SVC, Abort, UND, IRQ and FIQ modes) execute at EL3. + * + * The PL1&0 translation regime in AArch32 behaves like the + * EL1&0 regime in AArch64 except for the XN bits, but we set + * and unset them at the same time, so there's no difference in + * practice. + */ + return 1U; + } +} + +/******************************************************************************* + * Function for enabling the MMU in PL1 or PL2, assuming that the page tables + * have already been created. + ******************************************************************************/ +void setup_mmu_cfg(uint64_t *params, unsigned int flags, + const uint64_t *base_table, unsigned long long max_pa, + uintptr_t max_va, __unused int xlat_regime) +{ + uint64_t mair, ttbr0; + uint32_t ttbcr; + + /* Set attributes in the right indices of the MAIR */ + mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); + mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, + ATTR_IWBWA_OWBWA_NTR_INDEX); + mair |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE, + ATTR_NON_CACHEABLE_INDEX); + + /* + * Configure the control register for stage 1 of the PL1&0 or EL2 + * translation regimes. + */ + + /* Use the Long-descriptor translation table format. */ + ttbcr = TTBCR_EAE_BIT; + + if (xlat_regime == EL1_EL0_REGIME) { + assert(IS_IN_SVC() || IS_IN_MON()); + /* + * Disable translation table walk for addresses that are + * translated using TTBR1. Therefore, only TTBR0 is used. + */ + ttbcr |= TTBCR_EPD1_BIT; + } else { + assert(xlat_regime == EL2_REGIME); + assert(IS_IN_HYP()); + + /* + * Set HTCR bits as well. Set HTTBR table properties + * as Inner & outer WBWA & shareable. + */ + ttbcr |= HTCR_RES1 | + HTCR_SH0_INNER_SHAREABLE | HTCR_RGN0_OUTER_WBA | + HTCR_RGN0_INNER_WBA; + } + + /* + * Limit the input address ranges and memory region sizes translated + * using TTBR0 to the given virtual address space size, if smaller than + * 32 bits. + */ + if (max_va != UINT32_MAX) { + uintptr_t virtual_addr_space_size = max_va + 1U; + + assert(virtual_addr_space_size >= + xlat_get_min_virt_addr_space_size()); + assert(IS_POWER_OF_TWO(virtual_addr_space_size)); + + /* + * __builtin_ctzll(0) is undefined but here we are guaranteed + * that virtual_addr_space_size is in the range [1, UINT32_MAX]. + */ + int t0sz = 32 - __builtin_ctzll(virtual_addr_space_size); + + ttbcr |= (uint32_t) t0sz; + } + + /* + * Set the cacheability and shareability attributes for memory + * associated with translation table walks using TTBR0. + */ + if ((flags & XLAT_TABLE_NC) != 0U) { + /* Inner & outer non-cacheable non-shareable. */ + ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC | + TTBCR_RGN0_INNER_NC; + } else { + /* Inner & outer WBWA & shareable. */ + ttbcr |= TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | + TTBCR_RGN0_INNER_WBA; + } + + /* Set TTBR0 bits as well */ + ttbr0 = (uint64_t)(uintptr_t) base_table; + + if (is_armv8_2_ttcnp_present()) { + /* Enable CnP bit so as to share page tables with all PEs. */ + ttbr0 |= TTBR_CNP_BIT; + } + + /* Now populate MMU configuration */ + params[MMU_CFG_MAIR] = mair; + params[MMU_CFG_TCR] = (uint64_t) ttbcr; + params[MMU_CFG_TTBR0] = ttbr0; +} diff --git a/lib/xlat_tables_v2/aarch64/enable_mmu.S b/lib/xlat_tables_v2/aarch64/enable_mmu.S new file mode 100644 index 0000000..9f075e4 --- /dev/null +++ b/lib/xlat_tables_v2/aarch64/enable_mmu.S @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <asm_macros.S> +#include <assert_macros.S> +#include <lib/xlat_tables/xlat_tables_v2.h> + + .global enable_mmu_direct_el1 + .global enable_mmu_direct_el2 + .global enable_mmu_direct_el3 + + /* Macros to read and write to system register for a given EL. */ + .macro _msr reg_name, el, gp_reg + msr \reg_name\()_el\()\el, \gp_reg + .endm + + .macro _mrs gp_reg, reg_name, el + mrs \gp_reg, \reg_name\()_el\()\el + .endm + + .macro tlbi_invalidate_all el + .if \el == 1 + TLB_INVALIDATE(vmalle1) + .elseif \el == 2 + TLB_INVALIDATE(alle2) + .elseif \el == 3 + TLB_INVALIDATE(alle3) + .else + .error "EL must be 1, 2 or 3" + .endif + .endm + + /* void enable_mmu_direct_el<x>(unsigned int flags) */ + .macro define_mmu_enable_func el + func enable_mmu_direct_\()el\el +#if ENABLE_ASSERTIONS + _mrs x1, sctlr, \el + tst x1, #SCTLR_M_BIT + ASM_ASSERT(eq) +#endif + /* Invalidate all TLB entries */ + tlbi_invalidate_all \el + + mov x7, x0 + adrp x0, mmu_cfg_params + add x0, x0, :lo12:mmu_cfg_params + + /* MAIR */ + ldr x1, [x0, #(MMU_CFG_MAIR << 3)] + _msr mair, \el, x1 + + /* TCR */ + ldr x2, [x0, #(MMU_CFG_TCR << 3)] + _msr tcr, \el, x2 + + /* TTBR */ + ldr x3, [x0, #(MMU_CFG_TTBR0 << 3)] + _msr ttbr0, \el, x3 + + /* + * Ensure all translation table writes have drained into memory, the TLB + * invalidation is complete, and translation register writes are + * committed before enabling the MMU + */ + dsb ish + isb + + /* Set and clear required fields of SCTLR */ + _mrs x4, sctlr, \el + mov_imm x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT + orr x4, x4, x5 + + /* Additionally, amend SCTLR fields based on flags */ + bic x5, x4, #SCTLR_C_BIT + tst x7, #DISABLE_DCACHE + csel x4, x5, x4, ne + + _msr sctlr, \el, x4 + isb + + ret + endfunc enable_mmu_direct_\()el\el + .endm + + /* + * Define MMU-enabling functions for EL1, EL2 and EL3: + * + * enable_mmu_direct_el1 + * enable_mmu_direct_el2 + * enable_mmu_direct_el3 + */ + define_mmu_enable_func 1 + define_mmu_enable_func 2 + define_mmu_enable_func 3 diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c new file mode 100644 index 0000000..719110a --- /dev/null +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stdbool.h> +#include <stdint.h> + +#include <arch.h> +#include <arch_features.h> +#include <arch_helpers.h> +#include <lib/cassert.h> +#include <lib/utils_def.h> +#include <lib/xlat_tables/xlat_tables_v2.h> + +#include "../xlat_tables_private.h" + +/* + * Returns true if the provided granule size is supported, false otherwise. + */ +bool xlat_arch_is_granule_size_supported(size_t size) +{ + u_register_t id_aa64mmfr0_el1 = read_id_aa64mmfr0_el1(); + + if (size == PAGE_SIZE_4KB) { + return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) & + ID_AA64MMFR0_EL1_TGRAN4_MASK) == + ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED; + } else if (size == PAGE_SIZE_16KB) { + return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) & + ID_AA64MMFR0_EL1_TGRAN16_MASK) == + ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED; + } else if (size == PAGE_SIZE_64KB) { + return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) & + ID_AA64MMFR0_EL1_TGRAN64_MASK) == + ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED; + } else { + return 0; + } +} + +size_t xlat_arch_get_max_supported_granule_size(void) +{ + if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB)) { + return PAGE_SIZE_64KB; + } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB)) { + return PAGE_SIZE_16KB; + } else { + assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB)); + return PAGE_SIZE_4KB; + } +} + +/* + * Determine the physical address space encoded in the 'attr' parameter. + * + * The physical address will fall into one of four spaces; secure, + * nonsecure, root, or realm if RME is enabled, or one of two spaces; + * secure and nonsecure otherwise. + */ +uint32_t xlat_arch_get_pas(uint32_t attr) +{ + uint32_t pas = MT_PAS(attr); + + switch (pas) { +#if ENABLE_RME + /* TTD.NSE = 1 and TTD.NS = 1 for Realm PAS */ + case MT_REALM: + return LOWER_ATTRS(EL3_S1_NSE | NS); + /* TTD.NSE = 1 and TTD.NS = 0 for Root PAS */ + case MT_ROOT: + return LOWER_ATTRS(EL3_S1_NSE); +#endif + case MT_NS: + return LOWER_ATTRS(NS); + default: /* MT_SECURE */ + return 0U; + } +} + +unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr) +{ + /* Physical address can't exceed 48 bits */ + assert((max_addr & ADDR_MASK_48_TO_63) == 0U); + + /* 48 bits address */ + if ((max_addr & ADDR_MASK_44_TO_47) != 0U) + return TCR_PS_BITS_256TB; + + /* 44 bits address */ + if ((max_addr & ADDR_MASK_42_TO_43) != 0U) + return TCR_PS_BITS_16TB; + + /* 42 bits address */ + if ((max_addr & ADDR_MASK_40_TO_41) != 0U) + return TCR_PS_BITS_4TB; + + /* 40 bits address */ + if ((max_addr & ADDR_MASK_36_TO_39) != 0U) + return TCR_PS_BITS_1TB; + + /* 36 bits address */ + if ((max_addr & ADDR_MASK_32_TO_35) != 0U) + return TCR_PS_BITS_64GB; + + return TCR_PS_BITS_4GB; +} + +#if ENABLE_ASSERTIONS +/* + * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is + * supported in ARMv8.2 onwards. + */ +static const unsigned int pa_range_bits_arr[] = { + PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100, + PARANGE_0101, PARANGE_0110 +}; + +unsigned long long xlat_arch_get_max_supported_pa(void) +{ + u_register_t pa_range = read_id_aa64mmfr0_el1() & + ID_AA64MMFR0_EL1_PARANGE_MASK; + + /* All other values are reserved */ + assert(pa_range < ARRAY_SIZE(pa_range_bits_arr)); + + return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL; +} + +/* + * Return minimum virtual address space size supported by the architecture + */ +uintptr_t xlat_get_min_virt_addr_space_size(void) +{ + uintptr_t ret; + + if (is_armv8_4_ttst_present()) + ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST; + else + ret = MIN_VIRT_ADDR_SPACE_SIZE; + + return ret; +} +#endif /* ENABLE_ASSERTIONS*/ + +bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx) +{ + if (ctx->xlat_regime == EL1_EL0_REGIME) { + assert(xlat_arch_current_el() >= 1U); + return (read_sctlr_el1() & SCTLR_M_BIT) != 0U; + } else if (ctx->xlat_regime == EL2_REGIME) { + assert(xlat_arch_current_el() >= 2U); + return (read_sctlr_el2() & SCTLR_M_BIT) != 0U; + } else { + assert(ctx->xlat_regime == EL3_REGIME); + assert(xlat_arch_current_el() >= 3U); + return (read_sctlr_el3() & SCTLR_M_BIT) != 0U; + } +} + +bool is_dcache_enabled(void) +{ + unsigned int el = get_current_el_maybe_constant(); + + if (el == 1U) { + return (read_sctlr_el1() & SCTLR_C_BIT) != 0U; + } else if (el == 2U) { + return (read_sctlr_el2() & SCTLR_C_BIT) != 0U; + } else { + return (read_sctlr_el3() & SCTLR_C_BIT) != 0U; + } +} + +uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime) +{ + if (xlat_regime == EL1_EL0_REGIME) { + return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN); + } else { + assert((xlat_regime == EL2_REGIME) || + (xlat_regime == EL3_REGIME)); + return UPPER_ATTRS(XN); + } +} + +void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime) +{ + /* + * Ensure the translation table write has drained into memory before + * invalidating the TLB entry. + */ + dsbishst(); + + /* + * This function only supports invalidation of TLB entries for the EL3 + * and EL1&0 translation regimes. + * + * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher + * exception level (see section D4.9.2 of the ARM ARM rev B.a). + */ + if (xlat_regime == EL1_EL0_REGIME) { + assert(xlat_arch_current_el() >= 1U); + tlbivaae1is(TLBI_ADDR(va)); + } else if (xlat_regime == EL2_REGIME) { + assert(xlat_arch_current_el() >= 2U); + tlbivae2is(TLBI_ADDR(va)); + } else { + assert(xlat_regime == EL3_REGIME); + assert(xlat_arch_current_el() >= 3U); + tlbivae3is(TLBI_ADDR(va)); + } +} + +void xlat_arch_tlbi_va_sync(void) +{ + /* + * A TLB maintenance instruction can complete at any time after + * it is issued, but is only guaranteed to be complete after the + * execution of DSB by the PE that executed the TLB maintenance + * instruction. After the TLB invalidate instruction is + * complete, no new memory accesses using the invalidated TLB + * entries will be observed by any observer of the system + * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph + * "Ordering and completion of TLB maintenance instructions". + */ + dsbish(); + + /* + * The effects of a completed TLB maintenance instruction are + * only guaranteed to be visible on the PE that executed the + * instruction after the execution of an ISB instruction by the + * PE that executed the TLB maintenance instruction. + */ + isb(); +} + +unsigned int xlat_arch_current_el(void) +{ + unsigned int el = (unsigned int)GET_EL(read_CurrentEl()); + + assert(el > 0U); + + return el; +} + +void setup_mmu_cfg(uint64_t *params, unsigned int flags, + const uint64_t *base_table, unsigned long long max_pa, + uintptr_t max_va, int xlat_regime) +{ + uint64_t mair, ttbr0, tcr; + uintptr_t virtual_addr_space_size; + + /* Set attributes in the right indices of the MAIR. */ + mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); + mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX); + mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX); + + /* + * Limit the input address ranges and memory region sizes translated + * using TTBR0 to the given virtual address space size. + */ + assert(max_va < ((uint64_t)UINTPTR_MAX)); + + virtual_addr_space_size = (uintptr_t)max_va + 1U; + + assert(virtual_addr_space_size >= + xlat_get_min_virt_addr_space_size()); + assert(virtual_addr_space_size <= MAX_VIRT_ADDR_SPACE_SIZE); + assert(IS_POWER_OF_TWO(virtual_addr_space_size)); + + /* + * __builtin_ctzll(0) is undefined but here we are guaranteed that + * virtual_addr_space_size is in the range [1,UINTPTR_MAX]. + */ + int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size); + + tcr = (uint64_t)t0sz << TCR_T0SZ_SHIFT; + + /* + * Set the cacheability and shareability attributes for memory + * associated with translation table walks. + */ + if ((flags & XLAT_TABLE_NC) != 0U) { + /* Inner & outer non-cacheable non-shareable. */ + tcr |= TCR_SH_NON_SHAREABLE | + TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC; + } else { + /* Inner & outer WBWA & shareable. */ + tcr |= TCR_SH_INNER_SHAREABLE | + TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA; + } + + /* + * It is safer to restrict the max physical address accessible by the + * hardware as much as possible. + */ + unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa); + + if (xlat_regime == EL1_EL0_REGIME) { + /* + * TCR_EL1.EPD1: Disable translation table walk for addresses + * that are translated using TTBR1_EL1. + */ + tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT); + } else if (xlat_regime == EL2_REGIME) { + tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT); + } else { + assert(xlat_regime == EL3_REGIME); + tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT); + } + + /* Set TTBR bits as well */ + ttbr0 = (uint64_t) base_table; + + if (is_armv8_2_ttcnp_present()) { + /* Enable CnP bit so as to share page tables with all PEs. */ + ttbr0 |= TTBR_CNP_BIT; + } + + params[MMU_CFG_MAIR] = mair; + params[MMU_CFG_TCR] = tcr; + params[MMU_CFG_TTBR0] = ttbr0; +} diff --git a/lib/xlat_tables_v2/ro_xlat_tables.mk b/lib/xlat_tables_v2/ro_xlat_tables.mk new file mode 100644 index 0000000..fb8a426 --- /dev/null +++ b/lib/xlat_tables_v2/ro_xlat_tables.mk @@ -0,0 +1,41 @@ +# +# Copyright (c) 2020-2022, ARM Limited. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +ifeq (${USE_DEBUGFS}, 1) + $(error "Debugfs requires functionality from the dynamic translation \ + library and is incompatible with ALLOW_RO_XLAT_TABLES.") +endif + +ifeq (${ARCH},aarch32) + ifeq (${RESET_TO_SP_MIN},1) + $(error "RESET_TO_SP_MIN requires functionality from the dynamic \ + translation library and is incompatible with \ + ALLOW_RO_XLAT_TABLES.") + endif +else # if AArch64 + ifeq (${PLAT},tegra) + $(error "Tegra requires functionality from the dynamic translation \ + library and is incompatible with ALLOW_RO_XLAT_TABLES.") + endif + ifeq (${RESET_TO_BL31},1) + $(error "RESET_TO_BL31 requires functionality from the dynamic \ + translation library and is incompatible with \ + ALLOW_RO_XLAT_TABLES.") + endif + ifeq (${SPD},trusty) + $(error "Trusty requires functionality from the dynamic translation \ + library and is incompatible with ALLOW_RO_XLAT_TABLES.") + endif + ifeq (${SPM_MM},1) + $(error "SPM_MM requires functionality to change memory region \ + attributes, which is not possible once the translation tables \ + have been made read-only.") + endif + ifeq (${SPMC_AT_EL3},1) + $(error "EL3 SPMC requires functionality from the dynamic translation \ + library and is incompatible with ALLOW_RO_XLAT_TABLES.") + endif +endif diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk new file mode 100644 index 0000000..bcc3e68 --- /dev/null +++ b/lib/xlat_tables_v2/xlat_tables.mk @@ -0,0 +1,19 @@ +# +# Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \ + ${ARCH}/enable_mmu.S \ + ${ARCH}/xlat_tables_arch.c \ + xlat_tables_context.c \ + xlat_tables_core.c \ + xlat_tables_utils.c) + +XLAT_TABLES_LIB_V2 := 1 +$(eval $(call add_define,XLAT_TABLES_LIB_V2)) + +ifeq (${ALLOW_RO_XLAT_TABLES}, 1) + include lib/xlat_tables_v2/ro_xlat_tables.mk +endif diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c new file mode 100644 index 0000000..95dae88 --- /dev/null +++ b/lib/xlat_tables_v2/xlat_tables_context.c @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch_helpers.h> +#include <assert.h> + +#include <platform_def.h> + +#include <common/debug.h> +#include <lib/xlat_tables/xlat_tables_defs.h> +#include <lib/xlat_tables/xlat_tables_v2.h> + +#include "xlat_tables_private.h" + +/* + * MMU configuration register values for the active translation context. Used + * from the MMU assembly helpers. + */ +uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX]; + +/* + * Allocate and initialise the default translation context for the BL image + * currently executing. + */ +REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES, + PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE); + +void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size, + unsigned int attr) +{ + mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr); + + mmap_add_region_ctx(&tf_xlat_ctx, &mm); +} + +void mmap_add(const mmap_region_t *mm) +{ + mmap_add_ctx(&tf_xlat_ctx, mm); +} + +void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va, + size_t size, unsigned int attr) +{ + mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr); + + mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm); + + *base_va = mm.base_va; +} + +void mmap_add_alloc_va(mmap_region_t *mm) +{ + while (mm->granularity != 0U) { + assert(mm->base_va == 0U); + mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm); + mm++; + } +} + +#if PLAT_XLAT_TABLES_DYNAMIC + +int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va, + size_t size, unsigned int attr) +{ + mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr); + + return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm); +} + +int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa, + uintptr_t *base_va, size_t size, + unsigned int attr) +{ + mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr); + + int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm); + + *base_va = mm.base_va; + + return rc; +} + + +int mmap_remove_dynamic_region(uintptr_t base_va, size_t size) +{ + return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx, + base_va, size); +} + +#endif /* PLAT_XLAT_TABLES_DYNAMIC */ + +void __init init_xlat_tables(void) +{ + assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID); + + unsigned int current_el = xlat_arch_current_el(); + + if (current_el == 1U) { + tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME; + } else if (current_el == 2U) { + tf_xlat_ctx.xlat_regime = EL2_REGIME; + } else { + assert(current_el == 3U); + tf_xlat_ctx.xlat_regime = EL3_REGIME; + } + + init_xlat_tables_ctx(&tf_xlat_ctx); +} + +int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr) +{ + return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr); +} + +int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr) +{ + return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr); +} + +#if PLAT_RO_XLAT_TABLES +/* Change the memory attributes of the descriptors which resolve the address + * range that belongs to the translation tables themselves, which are by default + * mapped as part of read-write data in the BL image's memory. + * + * Since the translation tables map themselves via these level 3 (page) + * descriptors, any change applied to them with the MMU on would introduce a + * chicken and egg problem because of the break-before-make sequence. + * Eventually, it would reach the descriptor that resolves the very table it + * belongs to and the invalidation (break step) would cause the subsequent write + * (make step) to it to generate an MMU fault. Therefore, the MMU is disabled + * before making the change. + * + * No assumption is made about what data this function needs, therefore all the + * caches are flushed in order to ensure coherency. A future optimization would + * be to only flush the required data to main memory. + */ +int xlat_make_tables_readonly(void) +{ + assert(tf_xlat_ctx.initialized == true); +#ifdef __aarch64__ + if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) { + disable_mmu_el1(); + } else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) { + disable_mmu_el3(); + } else { + assert(tf_xlat_ctx.xlat_regime == EL2_REGIME); + return -1; + } + + /* Flush all caches. */ + dcsw_op_all(DCCISW); +#else /* !__aarch64__ */ + assert(tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME); + /* On AArch32, we flush the caches before disabling the MMU. The reason + * for this is that the dcsw_op_all AArch32 function pushes some + * registers onto the stack under the assumption that it is writing to + * cache, which is not true with the MMU off. This would result in the + * stack becoming corrupted and a wrong/junk value for the LR being + * restored at the end of the routine. + */ + dcsw_op_all(DC_OP_CISW); + disable_mmu_secure(); +#endif + + int rc = xlat_change_mem_attributes_ctx(&tf_xlat_ctx, + (uintptr_t)tf_xlat_ctx.tables, + tf_xlat_ctx.tables_num * XLAT_TABLE_SIZE, + MT_RO_DATA | MT_SECURE); + +#ifdef __aarch64__ + if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) { + enable_mmu_el1(0U); + } else { + assert(tf_xlat_ctx.xlat_regime == EL3_REGIME); + enable_mmu_el3(0U); + } +#else /* !__aarch64__ */ + enable_mmu_svc_mon(0U); +#endif + + if (rc == 0) { + tf_xlat_ctx.readonly_tables = true; + } + + return rc; +} +#endif /* PLAT_RO_XLAT_TABLES */ + +/* + * If dynamic allocation of new regions is disabled then by the time we call the + * function enabling the MMU, we'll have registered all the memory regions to + * map for the system's lifetime. Therefore, at this point we know the maximum + * physical address that will ever be mapped. + * + * If dynamic allocation is enabled then we can't make any such assumption + * because the maximum physical address could get pushed while adding a new + * region. Therefore, in this case we have to assume that the whole address + * space size might be mapped. + */ +#ifdef PLAT_XLAT_TABLES_DYNAMIC +#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address +#else +#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa +#endif + +#ifdef __aarch64__ + +void enable_mmu_el1(unsigned int flags) +{ + setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, + tf_xlat_ctx.base_table, MAX_PHYS_ADDR, + tf_xlat_ctx.va_max_address, EL1_EL0_REGIME); + enable_mmu_direct_el1(flags); +} + +void enable_mmu_el2(unsigned int flags) +{ + setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, + tf_xlat_ctx.base_table, MAX_PHYS_ADDR, + tf_xlat_ctx.va_max_address, EL2_REGIME); + enable_mmu_direct_el2(flags); +} + +void enable_mmu_el3(unsigned int flags) +{ + setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, + tf_xlat_ctx.base_table, MAX_PHYS_ADDR, + tf_xlat_ctx.va_max_address, EL3_REGIME); + enable_mmu_direct_el3(flags); +} + +void enable_mmu(unsigned int flags) +{ + switch (get_current_el_maybe_constant()) { + case 1: + enable_mmu_el1(flags); + break; + case 2: + enable_mmu_el2(flags); + break; + case 3: + enable_mmu_el3(flags); + break; + default: + panic(); + } +} + +#else /* !__aarch64__ */ + +void enable_mmu_svc_mon(unsigned int flags) +{ + setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, + tf_xlat_ctx.base_table, MAX_PHYS_ADDR, + tf_xlat_ctx.va_max_address, EL1_EL0_REGIME); + enable_mmu_direct_svc_mon(flags); +} + +void enable_mmu_hyp(unsigned int flags) +{ + setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, + tf_xlat_ctx.base_table, MAX_PHYS_ADDR, + tf_xlat_ctx.va_max_address, EL2_REGIME); + enable_mmu_direct_hyp(flags); +} + +#endif /* __aarch64__ */ diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c new file mode 100644 index 0000000..de57184 --- /dev/null +++ b/lib/xlat_tables_v2/xlat_tables_core.c @@ -0,0 +1,1244 @@ +/* + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> +#include <stdbool.h> +#include <stdint.h> +#include <string.h> + +#include <platform_def.h> + +#include <arch_features.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/utils_def.h> +#include <lib/xlat_tables/xlat_tables_defs.h> +#include <lib/xlat_tables/xlat_tables_v2.h> + +#include "xlat_tables_private.h" + +/* Helper function that cleans the data cache only if it is enabled. */ +static inline __attribute__((unused)) void xlat_clean_dcache_range(uintptr_t addr, size_t size) +{ + if (is_dcache_enabled()) + clean_dcache_range(addr, size); +} + +#if PLAT_XLAT_TABLES_DYNAMIC + +/* + * The following functions assume that they will be called using subtables only. + * The base table can't be unmapped, so it is not needed to do any special + * handling for it. + */ + +/* + * Returns the index of the array corresponding to the specified translation + * table. + */ +static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table) +{ + for (int i = 0; i < ctx->tables_num; i++) + if (ctx->tables[i] == table) + return i; + + /* + * Maybe we were asked to get the index of the base level table, which + * should never happen. + */ + assert(false); + + return -1; +} + +/* Returns a pointer to an empty translation table. */ +static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx) +{ + for (int i = 0; i < ctx->tables_num; i++) + if (ctx->tables_mapped_regions[i] == 0) + return ctx->tables[i]; + + return NULL; +} + +/* Increments region count for a given table. */ +static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx, + const uint64_t *table) +{ + int idx = xlat_table_get_index(ctx, table); + + ctx->tables_mapped_regions[idx]++; +} + +/* Decrements region count for a given table. */ +static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx, + const uint64_t *table) +{ + int idx = xlat_table_get_index(ctx, table); + + ctx->tables_mapped_regions[idx]--; +} + +/* Returns 0 if the specified table isn't empty, otherwise 1. */ +static bool xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table) +{ + return ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0; +} + +#else /* PLAT_XLAT_TABLES_DYNAMIC */ + +/* Returns a pointer to the first empty translation table. */ +static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx) +{ + assert(ctx->next_table < ctx->tables_num); + + return ctx->tables[ctx->next_table++]; +} + +#endif /* PLAT_XLAT_TABLES_DYNAMIC */ + +/* + * Returns a block/page table descriptor for the given level and attributes. + */ +uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr, + unsigned long long addr_pa, unsigned int level) +{ + uint64_t desc; + uint32_t mem_type; + uint32_t shareability_type; + + /* Make sure that the granularity is fine enough to map this address. */ + assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U); + + desc = addr_pa; + /* + * There are different translation table descriptors for level 3 and the + * rest. + */ + desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC; + /* + * Always set the access flag, as this library assumes access flag + * faults aren't managed. + */ + desc |= LOWER_ATTRS(ACCESS_FLAG); + + /* Determine the physical address space this region belongs to. */ + desc |= xlat_arch_get_pas(attr); + + /* + * Deduce other fields of the descriptor based on the MT_RW memory + * region attributes. + */ + desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO); + + /* + * Do not allow unprivileged access when the mapping is for a privileged + * EL. For translation regimes that do not have mappings for access for + * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED. + */ + if (ctx->xlat_regime == EL1_EL0_REGIME) { + if ((attr & MT_USER) != 0U) { + /* EL0 mapping requested, so we give User access */ + desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED); + } else { + /* EL1 mapping requested, no User access granted */ + desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED); + } + } else { + assert((ctx->xlat_regime == EL2_REGIME) || + (ctx->xlat_regime == EL3_REGIME)); + desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1); + } + + /* + * Deduce shareability domain and executability of the memory region + * from the memory type of the attributes (MT_TYPE). + * + * Data accesses to device memory and non-cacheable normal memory are + * coherent for all observers in the system, and correspondingly are + * always treated as being Outer Shareable. Therefore, for these 2 types + * of memory, it is not strictly needed to set the shareability field + * in the translation tables. + */ + mem_type = MT_TYPE(attr); + if (mem_type == MT_DEVICE) { + desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH); + /* + * Always map device memory as execute-never. + * This is to avoid the possibility of a speculative instruction + * fetch, which could be an issue if this memory region + * corresponds to a read-sensitive peripheral. + */ + desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime); + + } else { /* Normal memory */ + /* + * Always map read-write normal memory as execute-never. + * This library assumes that it is used by software that does + * not self-modify its code, therefore R/W memory is reserved + * for data storage, which must not be executable. + * + * Note that setting the XN bit here is for consistency only. + * The function that enables the MMU sets the SCTLR_ELx.WXN bit, + * which makes any writable memory region to be treated as + * execute-never, regardless of the value of the XN bit in the + * translation table. + * + * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER + * attribute to figure out the value of the XN bit. The actual + * XN bit(s) to set in the descriptor depends on the context's + * translation regime and the policy applied in + * xlat_arch_regime_get_xn_desc(). + */ + if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) { + desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime); + } + + shareability_type = MT_SHAREABILITY(attr); + if (mem_type == MT_MEMORY) { + desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX); + if (shareability_type == MT_SHAREABILITY_NSH) { + desc |= LOWER_ATTRS(NSH); + } else if (shareability_type == MT_SHAREABILITY_OSH) { + desc |= LOWER_ATTRS(OSH); + } else { + desc |= LOWER_ATTRS(ISH); + } + + /* Check if Branch Target Identification is enabled */ +#if ENABLE_BTI + /* Set GP bit for block and page code entries + * if BTI mechanism is implemented. + */ + if (is_armv8_5_bti_present() && + ((attr & (MT_TYPE_MASK | MT_RW | + MT_EXECUTE_NEVER)) == MT_CODE)) { + desc |= GP; + } +#endif + } else { + assert(mem_type == MT_NON_CACHEABLE); + desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH); + } + } + + return desc; +} + +/* + * Enumeration of actions that can be made when mapping table entries depending + * on the previous value in that entry and information about the region being + * mapped. + */ +typedef enum { + + /* Do nothing */ + ACTION_NONE, + + /* Write a block (or page, if in level 3) entry. */ + ACTION_WRITE_BLOCK_ENTRY, + + /* + * Create a new table and write a table entry pointing to it. Recurse + * into it for further processing. + */ + ACTION_CREATE_NEW_TABLE, + + /* + * There is a table descriptor in this entry, read it and recurse into + * that table for further processing. + */ + ACTION_RECURSE_INTO_TABLE, + +} action_t; + +/* + * Function that returns the first VA of the table affected by the specified + * mmap region. + */ +static uintptr_t xlat_tables_find_start_va(mmap_region_t *mm, + const uintptr_t table_base_va, + const unsigned int level) +{ + uintptr_t table_idx_va; + + if (mm->base_va > table_base_va) { + /* Find the first index of the table affected by the region. */ + table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level); + } else { + /* Start from the beginning of the table. */ + table_idx_va = table_base_va; + } + + return table_idx_va; +} + +/* + * Function that returns table index for the given VA and level arguments. + */ +static inline unsigned int xlat_tables_va_to_index(const uintptr_t table_base_va, + const uintptr_t va, + const unsigned int level) +{ + return (unsigned int)((va - table_base_va) >> XLAT_ADDR_SHIFT(level)); +} + +#if PLAT_XLAT_TABLES_DYNAMIC + +/* + * From the given arguments, it decides which action to take when unmapping the + * specified region. + */ +static action_t xlat_tables_unmap_region_action(const mmap_region_t *mm, + const uintptr_t table_idx_va, const uintptr_t table_idx_end_va, + const unsigned int level, const uint64_t desc_type) +{ + action_t action; + uintptr_t region_end_va = mm->base_va + mm->size - 1U; + + if ((mm->base_va <= table_idx_va) && + (region_end_va >= table_idx_end_va)) { + /* Region covers all block */ + + if (level == 3U) { + /* + * Last level, only page descriptors allowed, + * erase it. + */ + assert(desc_type == PAGE_DESC); + + action = ACTION_WRITE_BLOCK_ENTRY; + } else { + /* + * Other levels can have table descriptors. If + * so, recurse into it and erase descriptors + * inside it as needed. If there is a block + * descriptor, just erase it. If an invalid + * descriptor is found, this table isn't + * actually mapped, which shouldn't happen. + */ + if (desc_type == TABLE_DESC) { + action = ACTION_RECURSE_INTO_TABLE; + } else { + assert(desc_type == BLOCK_DESC); + action = ACTION_WRITE_BLOCK_ENTRY; + } + } + + } else if ((mm->base_va <= table_idx_end_va) || + (region_end_va >= table_idx_va)) { + /* + * Region partially covers block. + * + * It can't happen in level 3. + * + * There must be a table descriptor here, if not there + * was a problem when mapping the region. + */ + assert(level < 3U); + assert(desc_type == TABLE_DESC); + + action = ACTION_RECURSE_INTO_TABLE; + } else { + /* The region doesn't cover the block at all */ + action = ACTION_NONE; + } + + return action; +} +/* + * Recursive function that writes to the translation tables and unmaps the + * specified region. + */ +static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm, + const uintptr_t table_base_va, + uint64_t *const table_base, + const unsigned int table_entries, + const unsigned int level) +{ + assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX)); + + uint64_t *subtable; + uint64_t desc; + + uintptr_t table_idx_va; + uintptr_t table_idx_end_va; /* End VA of this entry */ + + uintptr_t region_end_va = mm->base_va + mm->size - 1U; + + unsigned int table_idx; + + table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level); + table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level); + + while (table_idx < table_entries) { + + table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U; + + desc = table_base[table_idx]; + uint64_t desc_type = desc & DESC_MASK; + + action_t action = xlat_tables_unmap_region_action(mm, + table_idx_va, table_idx_end_va, level, + desc_type); + + if (action == ACTION_WRITE_BLOCK_ENTRY) { + + table_base[table_idx] = INVALID_DESC; + xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime); + + } else if (action == ACTION_RECURSE_INTO_TABLE) { + + subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK); + + /* Recurse to write into subtable */ + xlat_tables_unmap_region(ctx, mm, table_idx_va, + subtable, XLAT_TABLE_ENTRIES, + level + 1U); +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)subtable, + XLAT_TABLE_ENTRIES * sizeof(uint64_t)); +#endif + /* + * If the subtable is now empty, remove its reference. + */ + if (xlat_table_is_empty(ctx, subtable)) { + table_base[table_idx] = INVALID_DESC; + xlat_arch_tlbi_va(table_idx_va, + ctx->xlat_regime); + } + + } else { + assert(action == ACTION_NONE); + } + + table_idx++; + table_idx_va += XLAT_BLOCK_SIZE(level); + + /* If reached the end of the region, exit */ + if (region_end_va <= table_idx_va) + break; + } + + if (level > ctx->base_level) + xlat_table_dec_regions_count(ctx, table_base); +} + +#endif /* PLAT_XLAT_TABLES_DYNAMIC */ + +/* + * From the given arguments, it decides which action to take when mapping the + * specified region. + */ +static action_t xlat_tables_map_region_action(const mmap_region_t *mm, + unsigned int desc_type, unsigned long long dest_pa, + uintptr_t table_entry_base_va, unsigned int level) +{ + uintptr_t mm_end_va = mm->base_va + mm->size - 1U; + uintptr_t table_entry_end_va = + table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U; + + /* + * The descriptor types allowed depend on the current table level. + */ + + if ((mm->base_va <= table_entry_base_va) && + (mm_end_va >= table_entry_end_va)) { + + /* + * Table entry is covered by region + * -------------------------------- + * + * This means that this table entry can describe the whole + * translation with this granularity in principle. + */ + + if (level == 3U) { + /* + * Last level, only page descriptors are allowed. + */ + if (desc_type == PAGE_DESC) { + /* + * There's another region mapped here, don't + * overwrite. + */ + return ACTION_NONE; + } else { + assert(desc_type == INVALID_DESC); + return ACTION_WRITE_BLOCK_ENTRY; + } + + } else { + + /* + * Other levels. Table descriptors are allowed. Block + * descriptors too, but they have some limitations. + */ + + if (desc_type == TABLE_DESC) { + /* There's already a table, recurse into it. */ + return ACTION_RECURSE_INTO_TABLE; + + } else if (desc_type == INVALID_DESC) { + /* + * There's nothing mapped here, create a new + * entry. + * + * Check if the destination granularity allows + * us to use a block descriptor or we need a + * finer table for it. + * + * Also, check if the current level allows block + * descriptors. If not, create a table instead. + */ + if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U) + || (level < MIN_LVL_BLOCK_DESC) || + (mm->granularity < XLAT_BLOCK_SIZE(level))) + return ACTION_CREATE_NEW_TABLE; + else + return ACTION_WRITE_BLOCK_ENTRY; + + } else { + /* + * There's another region mapped here, don't + * overwrite. + */ + assert(desc_type == BLOCK_DESC); + + return ACTION_NONE; + } + } + + } else if ((mm->base_va <= table_entry_end_va) || + (mm_end_va >= table_entry_base_va)) { + + /* + * Region partially covers table entry + * ----------------------------------- + * + * This means that this table entry can't describe the whole + * translation, a finer table is needed. + + * There cannot be partial block overlaps in level 3. If that + * happens, some of the preliminary checks when adding the + * mmap region failed to detect that PA and VA must at least be + * aligned to PAGE_SIZE. + */ + assert(level < 3U); + + if (desc_type == INVALID_DESC) { + /* + * The block is not fully covered by the region. Create + * a new table, recurse into it and try to map the + * region with finer granularity. + */ + return ACTION_CREATE_NEW_TABLE; + + } else { + assert(desc_type == TABLE_DESC); + /* + * The block is not fully covered by the region, but + * there is already a table here. Recurse into it and + * try to map with finer granularity. + * + * PAGE_DESC for level 3 has the same value as + * TABLE_DESC, but this code can't run on a level 3 + * table because there can't be overlaps in level 3. + */ + return ACTION_RECURSE_INTO_TABLE; + } + } else { + + /* + * This table entry is outside of the region specified in the + * arguments, don't write anything to it. + */ + return ACTION_NONE; + } +} + +/* + * Recursive function that writes to the translation tables and maps the + * specified region. On success, it returns the VA of the last byte that was + * successfully mapped. On error, it returns the VA of the next entry that + * should have been mapped. + */ +static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm, + uintptr_t table_base_va, + uint64_t *const table_base, + unsigned int table_entries, + unsigned int level) +{ + assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX)); + + uintptr_t mm_end_va = mm->base_va + mm->size - 1U; + + uintptr_t table_idx_va; + unsigned long long table_idx_pa; + + uint64_t *subtable; + uint64_t desc; + + unsigned int table_idx; + + table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level); + table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level); + +#if PLAT_XLAT_TABLES_DYNAMIC + if (level > ctx->base_level) + xlat_table_inc_regions_count(ctx, table_base); +#endif + + while (table_idx < table_entries) { + + desc = table_base[table_idx]; + + table_idx_pa = mm->base_pa + table_idx_va - mm->base_va; + + action_t action = xlat_tables_map_region_action(mm, + (uint32_t)(desc & DESC_MASK), table_idx_pa, + table_idx_va, level); + + if (action == ACTION_WRITE_BLOCK_ENTRY) { + + table_base[table_idx] = + xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa, + level); + + } else if (action == ACTION_CREATE_NEW_TABLE) { + uintptr_t end_va; + + subtable = xlat_table_get_empty(ctx); + if (subtable == NULL) { + /* Not enough free tables to map this region */ + return table_idx_va; + } + + /* Point to new subtable from this one. */ + table_base[table_idx] = + TABLE_DESC | (uintptr_t)subtable; + + /* Recurse to write into subtable */ + end_va = xlat_tables_map_region(ctx, mm, table_idx_va, + subtable, XLAT_TABLE_ENTRIES, + level + 1U); +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)subtable, + XLAT_TABLE_ENTRIES * sizeof(uint64_t)); +#endif + if (end_va != + (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U)) + return end_va; + + } else if (action == ACTION_RECURSE_INTO_TABLE) { + uintptr_t end_va; + + subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK); + /* Recurse to write into subtable */ + end_va = xlat_tables_map_region(ctx, mm, table_idx_va, + subtable, XLAT_TABLE_ENTRIES, + level + 1U); +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)subtable, + XLAT_TABLE_ENTRIES * sizeof(uint64_t)); +#endif + if (end_va != + (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U)) + return end_va; + + } else { + + assert(action == ACTION_NONE); + + } + + table_idx++; + table_idx_va += XLAT_BLOCK_SIZE(level); + + /* If reached the end of the region, exit */ + if (mm_end_va <= table_idx_va) + break; + } + + return table_idx_va - 1U; +} + +/* + * Function that verifies that a region can be mapped. + * Returns: + * 0: Success, the mapping is allowed. + * EINVAL: Invalid values were used as arguments. + * ERANGE: The memory limits were surpassed. + * ENOMEM: There is not enough memory in the mmap array. + * EPERM: Region overlaps another one in an invalid way. + */ +static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm) +{ + unsigned long long base_pa = mm->base_pa; + uintptr_t base_va = mm->base_va; + size_t size = mm->size; + size_t granularity = mm->granularity; + + unsigned long long end_pa = base_pa + size - 1U; + uintptr_t end_va = base_va + size - 1U; + + if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) || + !IS_PAGE_ALIGNED(size)) + return -EINVAL; + + if ((granularity != XLAT_BLOCK_SIZE(1U)) && + (granularity != XLAT_BLOCK_SIZE(2U)) && + (granularity != XLAT_BLOCK_SIZE(3U))) { + return -EINVAL; + } + + /* Check for overflows */ + if ((base_pa > end_pa) || (base_va > end_va)) + return -ERANGE; + + if (end_va > ctx->va_max_address) + return -ERANGE; + + if (end_pa > ctx->pa_max_address) + return -ERANGE; + + /* Check that there is space in the ctx->mmap array */ + if (ctx->mmap[ctx->mmap_num - 1].size != 0U) + return -ENOMEM; + + /* Check for PAs and VAs overlaps with all other regions */ + for (const mmap_region_t *mm_cursor = ctx->mmap; + mm_cursor->size != 0U; ++mm_cursor) { + + uintptr_t mm_cursor_end_va = mm_cursor->base_va + + mm_cursor->size - 1U; + + /* + * Check if one of the regions is completely inside the other + * one. + */ + bool fully_overlapped_va = + ((base_va >= mm_cursor->base_va) && + (end_va <= mm_cursor_end_va)) || + ((mm_cursor->base_va >= base_va) && + (mm_cursor_end_va <= end_va)); + + /* + * Full VA overlaps are only allowed if both regions are + * identity mapped (zero offset) or have the same VA to PA + * offset. Also, make sure that it's not the exact same area. + * This can only be done with static regions. + */ + if (fully_overlapped_va) { + +#if PLAT_XLAT_TABLES_DYNAMIC + if (((mm->attr & MT_DYNAMIC) != 0U) || + ((mm_cursor->attr & MT_DYNAMIC) != 0U)) + return -EPERM; +#endif /* PLAT_XLAT_TABLES_DYNAMIC */ + if ((mm_cursor->base_va - mm_cursor->base_pa) != + (base_va - base_pa)) + return -EPERM; + + if ((base_va == mm_cursor->base_va) && + (size == mm_cursor->size)) + return -EPERM; + + } else { + /* + * If the regions do not have fully overlapping VAs, + * then they must have fully separated VAs and PAs. + * Partial overlaps are not allowed + */ + + unsigned long long mm_cursor_end_pa = + mm_cursor->base_pa + mm_cursor->size - 1U; + + bool separated_pa = (end_pa < mm_cursor->base_pa) || + (base_pa > mm_cursor_end_pa); + bool separated_va = (end_va < mm_cursor->base_va) || + (base_va > mm_cursor_end_va); + + if (!separated_va || !separated_pa) + return -EPERM; + } + } + + return 0; +} + +void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm) +{ + mmap_region_t *mm_cursor = ctx->mmap, *mm_destination; + const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num; + const mmap_region_t *mm_last; + unsigned long long end_pa = mm->base_pa + mm->size - 1U; + uintptr_t end_va = mm->base_va + mm->size - 1U; + int ret; + + /* Ignore empty regions */ + if (mm->size == 0U) + return; + + /* Static regions must be added before initializing the xlat tables. */ + assert(!ctx->initialized); + + ret = mmap_add_region_check(ctx, mm); + if (ret != 0) { + ERROR("mmap_add_region_check() failed. error %d\n", ret); + assert(false); + return; + } + + /* + * Find correct place in mmap to insert new region. + * + * 1 - Lower region VA end first. + * 2 - Smaller region size first. + * + * VA 0 0xFF + * + * 1st |------| + * 2nd |------------| + * 3rd |------| + * 4th |---| + * 5th |---| + * 6th |----------| + * 7th |-------------------------------------| + * + * This is required for overlapping regions only. It simplifies adding + * regions with the loop in xlat_tables_init_internal because the outer + * ones won't overwrite block or page descriptors of regions added + * previously. + * + * Overlapping is only allowed for static regions. + */ + + while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va) + && (mm_cursor->size != 0U)) { + ++mm_cursor; + } + + while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) && + (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) { + ++mm_cursor; + } + + /* + * Find the last entry marker in the mmap + */ + mm_last = ctx->mmap; + while ((mm_last->size != 0U) && (mm_last < mm_end)) { + ++mm_last; + } + + /* + * Check if we have enough space in the memory mapping table. + * This shouldn't happen as we have checked in mmap_add_region_check + * that there is free space. + */ + assert(mm_last->size == 0U); + + /* Make room for new region by moving other regions up by one place */ + mm_destination = mm_cursor + 1; + (void)memmove(mm_destination, mm_cursor, + (uintptr_t)mm_last - (uintptr_t)mm_cursor); + + /* + * Check we haven't lost the empty sentinel from the end of the array. + * This shouldn't happen as we have checked in mmap_add_region_check + * that there is free space. + */ + assert(mm_end->size == 0U); + + *mm_cursor = *mm; + + if (end_pa > ctx->max_pa) + ctx->max_pa = end_pa; + if (end_va > ctx->max_va) + ctx->max_va = end_va; +} + +/* + * Determine the table level closest to the initial lookup level that + * can describe this translation. Then, align base VA to the next block + * at the determined level. + */ +static void mmap_alloc_va_align_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) +{ + /* + * By or'ing the size and base PA the alignment will be the one + * corresponding to the smallest boundary of the two of them. + * + * There are three different cases. For example (for 4 KiB page size): + * + * +--------------+------------------++--------------+ + * | PA alignment | Size multiple of || VA alignment | + * +--------------+------------------++--------------+ + * | 2 MiB | 2 MiB || 2 MiB | (1) + * | 2 MiB | 4 KiB || 4 KiB | (2) + * | 4 KiB | 2 MiB || 4 KiB | (3) + * +--------------+------------------++--------------+ + * + * - In (1), it is possible to take advantage of the alignment of the PA + * and the size of the region to use a level 2 translation table + * instead of a level 3 one. + * + * - In (2), the size is smaller than a block entry of level 2, so it is + * needed to use a level 3 table to describe the region or the library + * will map more memory than the desired one. + * + * - In (3), even though the region has the size of one level 2 block + * entry, it isn't possible to describe the translation with a level 2 + * block entry because of the alignment of the base PA. + * + * Only bits 47:21 of a level 2 block descriptor are used by the MMU, + * bits 20:0 of the resulting address are 0 in this case. Because of + * this, the PA generated as result of this translation is aligned to + * 2 MiB. The PA that was requested to be mapped is aligned to 4 KiB, + * though, which means that the resulting translation is incorrect. + * The only way to prevent this is by using a finer granularity. + */ + unsigned long long align_check; + + align_check = mm->base_pa | (unsigned long long)mm->size; + + /* + * Assume it is always aligned to level 3. There's no need to check that + * level because its block size is PAGE_SIZE. The checks to verify that + * the addresses and size are aligned to PAGE_SIZE are inside + * mmap_add_region. + */ + for (unsigned int level = ctx->base_level; level <= 2U; ++level) { + + if ((align_check & XLAT_BLOCK_MASK(level)) != 0U) + continue; + + mm->base_va = round_up(mm->base_va, XLAT_BLOCK_SIZE(level)); + return; + } +} + +void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) +{ + mm->base_va = ctx->max_va + 1UL; + + assert(mm->size > 0U); + + mmap_alloc_va_align_ctx(ctx, mm); + + /* Detect overflows. More checks are done in mmap_add_region_check(). */ + assert(mm->base_va > ctx->max_va); + + mmap_add_region_ctx(ctx, mm); +} + +void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm) +{ + const mmap_region_t *mm_cursor = mm; + + while (mm_cursor->granularity != 0U) { + mmap_add_region_ctx(ctx, mm_cursor); + mm_cursor++; + } +} + +#if PLAT_XLAT_TABLES_DYNAMIC + +int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) +{ + mmap_region_t *mm_cursor = ctx->mmap; + const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num; + unsigned long long end_pa = mm->base_pa + mm->size - 1U; + uintptr_t end_va = mm->base_va + mm->size - 1U; + int ret; + + /* Nothing to do */ + if (mm->size == 0U) + return 0; + + /* Now this region is a dynamic one */ + mm->attr |= MT_DYNAMIC; + + ret = mmap_add_region_check(ctx, mm); + if (ret != 0) + return ret; + + /* + * Find the adequate entry in the mmap array in the same way done for + * static regions in mmap_add_region_ctx(). + */ + + while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va) + && (mm_cursor->size != 0U)) { + ++mm_cursor; + } + + while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) && + (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) { + ++mm_cursor; + } + + /* Make room for new region by moving other regions up by one place */ + (void)memmove(mm_cursor + 1U, mm_cursor, + (uintptr_t)mm_last - (uintptr_t)mm_cursor); + + /* + * Check we haven't lost the empty sentinal from the end of the array. + * This shouldn't happen as we have checked in mmap_add_region_check + * that there is free space. + */ + assert(mm_last->size == 0U); + + *mm_cursor = *mm; + + /* + * Update the translation tables if the xlat tables are initialized. If + * not, this region will be mapped when they are initialized. + */ + if (ctx->initialized) { + end_va = xlat_tables_map_region(ctx, mm_cursor, + 0U, ctx->base_table, ctx->base_table_entries, + ctx->base_level); +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)ctx->base_table, + ctx->base_table_entries * sizeof(uint64_t)); +#endif + /* Failed to map, remove mmap entry, unmap and return error. */ + if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) { + (void)memmove(mm_cursor, mm_cursor + 1U, + (uintptr_t)mm_last - (uintptr_t)mm_cursor); + + /* + * Check if the mapping function actually managed to map + * anything. If not, just return now. + */ + if (mm->base_va >= end_va) + return -ENOMEM; + + /* + * Something went wrong after mapping some table + * entries, undo every change done up to this point. + */ + mmap_region_t unmap_mm = { + .base_pa = 0U, + .base_va = mm->base_va, + .size = end_va - mm->base_va, + .attr = 0U + }; + xlat_tables_unmap_region(ctx, &unmap_mm, 0U, + ctx->base_table, ctx->base_table_entries, + ctx->base_level); +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)ctx->base_table, + ctx->base_table_entries * sizeof(uint64_t)); +#endif + return -ENOMEM; + } + + /* + * Make sure that all entries are written to the memory. There + * is no need to invalidate entries when mapping dynamic regions + * because new table/block/page descriptors only replace old + * invalid descriptors, that aren't TLB cached. + */ + dsbishst(); + } + + if (end_pa > ctx->max_pa) + ctx->max_pa = end_pa; + if (end_va > ctx->max_va) + ctx->max_va = end_va; + + return 0; +} + +int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) +{ + mm->base_va = ctx->max_va + 1UL; + + if (mm->size == 0U) + return 0; + + mmap_alloc_va_align_ctx(ctx, mm); + + /* Detect overflows. More checks are done in mmap_add_region_check(). */ + if (mm->base_va < ctx->max_va) { + return -ENOMEM; + } + + return mmap_add_dynamic_region_ctx(ctx, mm); +} + +/* + * Removes the region with given base Virtual Address and size from the given + * context. + * + * Returns: + * 0: Success. + * EINVAL: Invalid values were used as arguments (region not found). + * EPERM: Tried to remove a static region. + */ +int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va, + size_t size) +{ + mmap_region_t *mm = ctx->mmap; + const mmap_region_t *mm_last = mm + ctx->mmap_num; + int update_max_va_needed = 0; + int update_max_pa_needed = 0; + + /* Check sanity of mmap array. */ + assert(mm[ctx->mmap_num].size == 0U); + + while (mm->size != 0U) { + if ((mm->base_va == base_va) && (mm->size == size)) + break; + ++mm; + } + + /* Check that the region was found */ + if (mm->size == 0U) + return -EINVAL; + + /* If the region is static it can't be removed */ + if ((mm->attr & MT_DYNAMIC) == 0U) + return -EPERM; + + /* Check if this region is using the top VAs or PAs. */ + if ((mm->base_va + mm->size - 1U) == ctx->max_va) + update_max_va_needed = 1; + if ((mm->base_pa + mm->size - 1U) == ctx->max_pa) + update_max_pa_needed = 1; + + /* Update the translation tables if needed */ + if (ctx->initialized) { + xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table, + ctx->base_table_entries, + ctx->base_level); +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)ctx->base_table, + ctx->base_table_entries * sizeof(uint64_t)); +#endif + xlat_arch_tlbi_va_sync(); + } + + /* Remove this region by moving the rest down by one place. */ + (void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm); + + /* Check if we need to update the max VAs and PAs */ + if (update_max_va_needed == 1) { + ctx->max_va = 0U; + mm = ctx->mmap; + while (mm->size != 0U) { + if ((mm->base_va + mm->size - 1U) > ctx->max_va) + ctx->max_va = mm->base_va + mm->size - 1U; + ++mm; + } + } + + if (update_max_pa_needed == 1) { + ctx->max_pa = 0U; + mm = ctx->mmap; + while (mm->size != 0U) { + if ((mm->base_pa + mm->size - 1U) > ctx->max_pa) + ctx->max_pa = mm->base_pa + mm->size - 1U; + ++mm; + } + } + + return 0; +} + +void xlat_setup_dynamic_ctx(xlat_ctx_t *ctx, unsigned long long pa_max, + uintptr_t va_max, struct mmap_region *mmap, + unsigned int mmap_num, uint64_t **tables, + unsigned int tables_num, uint64_t *base_table, + int xlat_regime, int *mapped_regions) +{ + ctx->xlat_regime = xlat_regime; + + ctx->pa_max_address = pa_max; + ctx->va_max_address = va_max; + + ctx->mmap = mmap; + ctx->mmap_num = mmap_num; + memset(ctx->mmap, 0, sizeof(struct mmap_region) * mmap_num); + + ctx->tables = (void *) tables; + ctx->tables_num = tables_num; + + uintptr_t va_space_size = va_max + 1; + ctx->base_level = GET_XLAT_TABLE_LEVEL_BASE(va_space_size); + ctx->base_table = base_table; + ctx->base_table_entries = GET_NUM_BASE_LEVEL_ENTRIES(va_space_size); + + ctx->tables_mapped_regions = mapped_regions; + + ctx->max_pa = 0; + ctx->max_va = 0; + ctx->initialized = 0; +} + +#endif /* PLAT_XLAT_TABLES_DYNAMIC */ + +void __init init_xlat_tables_ctx(xlat_ctx_t *ctx) +{ + assert(ctx != NULL); + assert(!ctx->initialized); + assert((ctx->xlat_regime == EL3_REGIME) || + (ctx->xlat_regime == EL2_REGIME) || + (ctx->xlat_regime == EL1_EL0_REGIME)); + assert(!is_mmu_enabled_ctx(ctx)); + + mmap_region_t *mm = ctx->mmap; + + assert(ctx->va_max_address >= + (xlat_get_min_virt_addr_space_size() - 1U)); + assert(ctx->va_max_address <= (MAX_VIRT_ADDR_SPACE_SIZE - 1U)); + assert(IS_POWER_OF_TWO(ctx->va_max_address + 1U)); + + xlat_mmap_print(mm); + + /* All tables must be zeroed before mapping any region. */ + + for (unsigned int i = 0U; i < ctx->base_table_entries; i++) + ctx->base_table[i] = INVALID_DESC; + + for (int j = 0; j < ctx->tables_num; j++) { +#if PLAT_XLAT_TABLES_DYNAMIC + ctx->tables_mapped_regions[j] = 0; +#endif + for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++) + ctx->tables[j][i] = INVALID_DESC; + } + + while (mm->size != 0U) { + uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U, + ctx->base_table, ctx->base_table_entries, + ctx->base_level); +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)ctx->base_table, + ctx->base_table_entries * sizeof(uint64_t)); +#endif + if (end_va != (mm->base_va + mm->size - 1U)) { + ERROR("Not enough memory to map region:\n" + " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n", + mm->base_va, mm->base_pa, mm->size, mm->attr); + panic(); + } + + mm++; + } + + assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa()); + assert(ctx->max_va <= ctx->va_max_address); + assert(ctx->max_pa <= ctx->pa_max_address); + + ctx->initialized = true; + + xlat_tables_print(ctx); +} diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h new file mode 100644 index 0000000..42c9a43 --- /dev/null +++ b/lib/xlat_tables_v2/xlat_tables_private.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef XLAT_TABLES_PRIVATE_H +#define XLAT_TABLES_PRIVATE_H + +#include <stdbool.h> + +#include <platform_def.h> + +#include <lib/xlat_tables/xlat_tables_defs.h> + +#if PLAT_XLAT_TABLES_DYNAMIC +/* + * Private shifts and masks to access fields of an mmap attribute + */ +/* Dynamic or static */ +#define MT_DYN_SHIFT U(31) + +/* + * Memory mapping private attributes + * + * Private attributes not exposed in the public header. + */ + +/* + * Regions mapped before the MMU can't be unmapped dynamically (they are + * static) and regions mapped with MMU enabled can be unmapped. This + * behaviour can't be overridden. + * + * Static regions can overlap each other, dynamic regions can't. + */ +#define MT_STATIC (U(0) << MT_DYN_SHIFT) +#define MT_DYNAMIC (U(1) << MT_DYN_SHIFT) + +#endif /* PLAT_XLAT_TABLES_DYNAMIC */ + +extern uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX]; + +/* Determine the physical address space encoded in the 'attr' parameter. */ +uint32_t xlat_arch_get_pas(uint32_t attr); + +/* + * Return the execute-never mask that will prevent instruction fetch at the + * given translation regime. + */ +uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime); + +/* + * Invalidate all TLB entries that match the given virtual address. This + * operation applies to all PEs in the same Inner Shareable domain as the PE + * that executes this function. This functions must be called for every + * translation table entry that is modified. It only affects the specified + * translation regime. + * + * Note, however, that it is architecturally UNDEFINED to invalidate TLB entries + * pertaining to a higher exception level, e.g. invalidating EL3 entries from + * S-EL1. + */ +void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime); + +/* + * This function has to be called at the end of any code that uses the function + * xlat_arch_tlbi_va(). + */ +void xlat_arch_tlbi_va_sync(void); + +/* Print VA, PA, size and attributes of all regions in the mmap array. */ +void xlat_mmap_print(const mmap_region_t *mmap); + +/* + * Print the current state of the translation tables by reading them from + * memory. + */ +void xlat_tables_print(xlat_ctx_t *ctx); + +/* + * Returns a block/page table descriptor for the given level and attributes. + */ +uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr, + unsigned long long addr_pa, unsigned int level); + +/* + * Architecture-specific initialization code. + */ + +/* Returns the current Exception Level. The returned EL must be 1 or higher. */ +unsigned int xlat_arch_current_el(void); + +/* + * Return the maximum physical address supported by the hardware. + * This value depends on the execution state (AArch32/AArch64). + */ +unsigned long long xlat_arch_get_max_supported_pa(void); + +/* + * Returns true if the MMU of the translation regime managed by the given + * xlat_ctx_t is enabled, false otherwise. + */ +bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx); + +/* + * Returns minimum virtual address space size supported by the architecture + */ +uintptr_t xlat_get_min_virt_addr_space_size(void); + +#endif /* XLAT_TABLES_PRIVATE_H */ diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c new file mode 100644 index 0000000..38a375e --- /dev/null +++ b/lib/xlat_tables_v2/xlat_tables_utils.c @@ -0,0 +1,592 @@ +/* + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <errno.h> +#include <inttypes.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> + +#include <platform_def.h> + +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/utils_def.h> +#include <lib/xlat_tables/xlat_tables_defs.h> +#include <lib/xlat_tables/xlat_tables_v2.h> + +#include "xlat_tables_private.h" + +#if LOG_LEVEL < LOG_LEVEL_VERBOSE + +void xlat_mmap_print(__unused const mmap_region_t *mmap) +{ + /* Empty */ +} + +void xlat_tables_print(__unused xlat_ctx_t *ctx) +{ + /* Empty */ +} + +#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */ + +void xlat_mmap_print(const mmap_region_t *mmap) +{ + printf("mmap:\n"); + const mmap_region_t *mm = mmap; + + while (mm->size != 0U) { + printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x granularity:0x%zx\n", + mm->base_va, mm->base_pa, mm->size, mm->attr, + mm->granularity); + ++mm; + }; + printf("\n"); +} + +/* Print the attributes of the specified block descriptor. */ +static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc) +{ + uint64_t mem_type_index = ATTR_INDEX_GET(desc); + int xlat_regime = ctx->xlat_regime; + + if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) { + printf("MEM"); + } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) { + printf("NC"); + } else { + assert(mem_type_index == ATTR_DEVICE_INDEX); + printf("DEV"); + } + + if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) { + /* For EL3 and EL2 only check the AP[2] and XN bits. */ + printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW"); + printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC"); + } else { + assert(xlat_regime == EL1_EL0_REGIME); + /* + * For EL0 and EL1: + * - In AArch64 PXN and UXN can be set independently but in + * AArch32 there is no UXN (XN affects both privilege levels). + * For consistency, we set them simultaneously in both cases. + * - RO and RW permissions must be the same in EL1 and EL0. If + * EL0 can access that memory region, so can EL1, with the + * same permissions. + */ +#if ENABLE_ASSERTIONS + uint64_t xn_mask = xlat_arch_regime_get_xn_desc(EL1_EL0_REGIME); + uint64_t xn_perm = desc & xn_mask; + + assert((xn_perm == xn_mask) || (xn_perm == 0ULL)); +#endif + printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW"); + /* Only check one of PXN and UXN, the other one is the same. */ + printf(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-XN" : "-EXEC"); + /* + * Privileged regions can only be accessed from EL1, user + * regions can be accessed from EL1 and EL0. + */ + printf(((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) != 0ULL) + ? "-USER" : "-PRIV"); + } + +#if ENABLE_RME + switch (desc & LOWER_ATTRS(EL3_S1_NSE | NS)) { + case 0ULL: + printf("-S"); + break; + case LOWER_ATTRS(NS): + printf("-NS"); + break; + case LOWER_ATTRS(EL3_S1_NSE): + printf("-RT"); + break; + default: /* LOWER_ATTRS(EL3_S1_NSE | NS) */ + printf("-RL"); + } +#else + printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S"); +#endif + +#ifdef __aarch64__ + /* Check Guarded Page bit */ + if ((desc & GP) != 0ULL) { + printf("-GP"); + } +#endif +} + +static const char * const level_spacers[] = { + "[LV0] ", + " [LV1] ", + " [LV2] ", + " [LV3] " +}; + +static const char *invalid_descriptors_ommited = + "%s(%d invalid descriptors omitted)\n"; + +/* + * Recursive function that reads the translation tables passed as an argument + * and prints their status. + */ +static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va, + const uint64_t *table_base, unsigned int table_entries, + unsigned int level) +{ + assert(level <= XLAT_TABLE_LEVEL_MAX); + + uint64_t desc; + uintptr_t table_idx_va = table_base_va; + unsigned int table_idx = 0U; + size_t level_size = XLAT_BLOCK_SIZE(level); + + /* + * Keep track of how many invalid descriptors are counted in a row. + * Whenever multiple invalid descriptors are found, only the first one + * is printed, and a line is added to inform about how many descriptors + * have been omitted. + */ + int invalid_row_count = 0; + + while (table_idx < table_entries) { + + desc = table_base[table_idx]; + + if ((desc & DESC_MASK) == INVALID_DESC) { + + if (invalid_row_count == 0) { + printf("%sVA:0x%lx size:0x%zx\n", + level_spacers[level], + table_idx_va, level_size); + } + invalid_row_count++; + + } else { + + if (invalid_row_count > 1) { + printf(invalid_descriptors_ommited, + level_spacers[level], + invalid_row_count - 1); + } + invalid_row_count = 0; + + /* + * Check if this is a table or a block. Tables are only + * allowed in levels other than 3, but DESC_PAGE has the + * same value as DESC_TABLE, so we need to check. + */ + if (((desc & DESC_MASK) == TABLE_DESC) && + (level < XLAT_TABLE_LEVEL_MAX)) { + /* + * Do not print any PA for a table descriptor, + * as it doesn't directly map physical memory + * but instead points to the next translation + * table in the translation table walk. + */ + printf("%sVA:0x%lx size:0x%zx\n", + level_spacers[level], + table_idx_va, level_size); + + uintptr_t addr_inner = desc & TABLE_ADDR_MASK; + + xlat_tables_print_internal(ctx, table_idx_va, + (uint64_t *)addr_inner, + XLAT_TABLE_ENTRIES, level + 1U); + } else { + printf("%sVA:0x%lx PA:0x%" PRIx64 " size:0x%zx ", + level_spacers[level], table_idx_va, + (uint64_t)(desc & TABLE_ADDR_MASK), + level_size); + xlat_desc_print(ctx, desc); + printf("\n"); + } + } + + table_idx++; + table_idx_va += level_size; + } + + if (invalid_row_count > 1) { + printf(invalid_descriptors_ommited, + level_spacers[level], invalid_row_count - 1); + } +} + +void xlat_tables_print(xlat_ctx_t *ctx) +{ + const char *xlat_regime_str; + int used_page_tables; + + if (ctx->xlat_regime == EL1_EL0_REGIME) { + xlat_regime_str = "1&0"; + } else if (ctx->xlat_regime == EL2_REGIME) { + xlat_regime_str = "2"; + } else { + assert(ctx->xlat_regime == EL3_REGIME); + xlat_regime_str = "3"; + } + VERBOSE("Translation tables state:\n"); + VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str); + VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address); + VERBOSE(" Max allowed VA: 0x%lx\n", ctx->va_max_address); + VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa); + VERBOSE(" Max mapped VA: 0x%lx\n", ctx->max_va); + + VERBOSE(" Initial lookup level: %u\n", ctx->base_level); + VERBOSE(" Entries @initial lookup level: %u\n", + ctx->base_table_entries); + +#if PLAT_XLAT_TABLES_DYNAMIC + used_page_tables = 0; + for (int i = 0; i < ctx->tables_num; ++i) { + if (ctx->tables_mapped_regions[i] != 0) + ++used_page_tables; + } +#else + used_page_tables = ctx->next_table; +#endif + VERBOSE(" Used %d sub-tables out of %d (spare: %d)\n", + used_page_tables, ctx->tables_num, + ctx->tables_num - used_page_tables); + + xlat_tables_print_internal(ctx, 0U, ctx->base_table, + ctx->base_table_entries, ctx->base_level); +} + +#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */ + +/* + * Do a translation table walk to find the block or page descriptor that maps + * virtual_addr. + * + * On success, return the address of the descriptor within the translation + * table. Its lookup level is stored in '*out_level'. + * On error, return NULL. + * + * xlat_table_base + * Base address for the initial lookup level. + * xlat_table_base_entries + * Number of entries in the translation table for the initial lookup level. + * virt_addr_space_size + * Size in bytes of the virtual address space. + */ +static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr, + void *xlat_table_base, + unsigned int xlat_table_base_entries, + unsigned long long virt_addr_space_size, + unsigned int *out_level) +{ + unsigned int start_level; + uint64_t *table; + unsigned int entries; + + start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size); + + table = xlat_table_base; + entries = xlat_table_base_entries; + + for (unsigned int level = start_level; + level <= XLAT_TABLE_LEVEL_MAX; + ++level) { + uint64_t idx, desc, desc_type; + + idx = XLAT_TABLE_IDX(virtual_addr, level); + if (idx >= entries) { + WARN("Missing xlat table entry at address 0x%lx\n", + virtual_addr); + return NULL; + } + + desc = table[idx]; + desc_type = desc & DESC_MASK; + + if (desc_type == INVALID_DESC) { + VERBOSE("Invalid entry (memory not mapped)\n"); + return NULL; + } + + if (level == XLAT_TABLE_LEVEL_MAX) { + /* + * Only page descriptors allowed at the final lookup + * level. + */ + assert(desc_type == PAGE_DESC); + *out_level = level; + return &table[idx]; + } + + if (desc_type == BLOCK_DESC) { + *out_level = level; + return &table[idx]; + } + + assert(desc_type == TABLE_DESC); + table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK); + entries = XLAT_TABLE_ENTRIES; + } + + /* + * This shouldn't be reached, the translation table walk should end at + * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop. + */ + assert(false); + + return NULL; +} + + +static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx, + uintptr_t base_va, uint32_t *attributes, uint64_t **table_entry, + unsigned long long *addr_pa, unsigned int *table_level) +{ + uint64_t *entry; + uint64_t desc; + unsigned int level; + unsigned long long virt_addr_space_size; + + /* + * Sanity-check arguments. + */ + assert(ctx != NULL); + assert(ctx->initialized); + assert((ctx->xlat_regime == EL1_EL0_REGIME) || + (ctx->xlat_regime == EL2_REGIME) || + (ctx->xlat_regime == EL3_REGIME)); + + virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL; + assert(virt_addr_space_size > 0U); + + entry = find_xlat_table_entry(base_va, + ctx->base_table, + ctx->base_table_entries, + virt_addr_space_size, + &level); + if (entry == NULL) { + WARN("Address 0x%lx is not mapped.\n", base_va); + return -EINVAL; + } + + if (addr_pa != NULL) { + *addr_pa = *entry & TABLE_ADDR_MASK; + } + + if (table_entry != NULL) { + *table_entry = entry; + } + + if (table_level != NULL) { + *table_level = level; + } + + desc = *entry; + +#if LOG_LEVEL >= LOG_LEVEL_VERBOSE + VERBOSE("Attributes: "); + xlat_desc_print(ctx, desc); + printf("\n"); +#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */ + + assert(attributes != NULL); + *attributes = 0U; + + uint64_t attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK; + + if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) { + *attributes |= MT_MEMORY; + } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) { + *attributes |= MT_NON_CACHEABLE; + } else { + assert(attr_index == ATTR_DEVICE_INDEX); + *attributes |= MT_DEVICE; + } + + uint64_t ap2_bit = (desc >> AP2_SHIFT) & 1U; + + if (ap2_bit == AP2_RW) + *attributes |= MT_RW; + + if (ctx->xlat_regime == EL1_EL0_REGIME) { + uint64_t ap1_bit = (desc >> AP1_SHIFT) & 1U; + + if (ap1_bit == AP1_ACCESS_UNPRIVILEGED) + *attributes |= MT_USER; + } + + uint64_t ns_bit = (desc >> NS_SHIFT) & 1U; + + if (ns_bit == 1U) + *attributes |= MT_NS; + + uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime); + + if ((desc & xn_mask) == xn_mask) { + *attributes |= MT_EXECUTE_NEVER; + } else { + assert((desc & xn_mask) == 0U); + } + + return 0; +} + + +int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va, + uint32_t *attr) +{ + return xlat_get_mem_attributes_internal(ctx, base_va, attr, + NULL, NULL, NULL); +} + + +int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va, + size_t size, uint32_t attr) +{ + /* Note: This implementation isn't optimized. */ + + assert(ctx != NULL); + assert(ctx->initialized); + + unsigned long long virt_addr_space_size = + (unsigned long long)ctx->va_max_address + 1U; + assert(virt_addr_space_size > 0U); + + if (!IS_PAGE_ALIGNED(base_va)) { + WARN("%s: Address 0x%lx is not aligned on a page boundary.\n", + __func__, base_va); + return -EINVAL; + } + + if (size == 0U) { + WARN("%s: Size is 0.\n", __func__); + return -EINVAL; + } + + if ((size % PAGE_SIZE) != 0U) { + WARN("%s: Size 0x%zx is not a multiple of a page size.\n", + __func__, size); + return -EINVAL; + } + + if (((attr & MT_EXECUTE_NEVER) == 0U) && ((attr & MT_RW) != 0U)) { + WARN("%s: Mapping memory as read-write and executable not allowed.\n", + __func__); + return -EINVAL; + } + + size_t pages_count = size / PAGE_SIZE; + + VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n", + pages_count, base_va); + + uintptr_t base_va_original = base_va; + + /* + * Sanity checks. + */ + for (unsigned int i = 0U; i < pages_count; ++i) { + const uint64_t *entry; + uint64_t desc, attr_index; + unsigned int level; + + entry = find_xlat_table_entry(base_va, + ctx->base_table, + ctx->base_table_entries, + virt_addr_space_size, + &level); + if (entry == NULL) { + WARN("Address 0x%lx is not mapped.\n", base_va); + return -EINVAL; + } + + desc = *entry; + + /* + * Check that all the required pages are mapped at page + * granularity. + */ + if (((desc & DESC_MASK) != PAGE_DESC) || + (level != XLAT_TABLE_LEVEL_MAX)) { + WARN("Address 0x%lx is not mapped at the right granularity.\n", + base_va); + WARN("Granularity is 0x%lx, should be 0x%lx.\n", + XLAT_BLOCK_SIZE(level), PAGE_SIZE); + return -EINVAL; + } + + /* + * If the region type is device, it shouldn't be executable. + */ + attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK; + if (attr_index == ATTR_DEVICE_INDEX) { + if ((attr & MT_EXECUTE_NEVER) == 0U) { + WARN("Setting device memory as executable at address 0x%lx.", + base_va); + return -EINVAL; + } + } + + base_va += PAGE_SIZE; + } + + /* Restore original value. */ + base_va = base_va_original; + + for (unsigned int i = 0U; i < pages_count; ++i) { + + uint32_t old_attr = 0U, new_attr; + uint64_t *entry = NULL; + unsigned int level = 0U; + unsigned long long addr_pa = 0ULL; + + (void) xlat_get_mem_attributes_internal(ctx, base_va, &old_attr, + &entry, &addr_pa, &level); + + /* + * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and + * MT_USER/MT_PRIVILEGED are taken into account. Any other + * information is ignored. + */ + + /* Clean the old attributes so that they can be rebuilt. */ + new_attr = old_attr & ~(MT_RW | MT_EXECUTE_NEVER | MT_USER); + + /* + * Update attributes, but filter out the ones this function + * isn't allowed to change. + */ + new_attr |= attr & (MT_RW | MT_EXECUTE_NEVER | MT_USER); + + /* + * The break-before-make sequence requires writing an invalid + * descriptor and making sure that the system sees the change + * before writing the new descriptor. + */ + *entry = INVALID_DESC; +#if !HW_ASSISTED_COHERENCY + dccvac((uintptr_t)entry); +#endif + /* Invalidate any cached copy of this mapping in the TLBs. */ + xlat_arch_tlbi_va(base_va, ctx->xlat_regime); + + /* Ensure completion of the invalidation. */ + xlat_arch_tlbi_va_sync(); + + /* Write new descriptor */ + *entry = xlat_desc(ctx, new_attr, addr_pa, level); +#if !HW_ASSISTED_COHERENCY + dccvac((uintptr_t)entry); +#endif + base_va += PAGE_SIZE; + } + + /* Ensure that the last descriptor writen is seen by the system. */ + dsbish(); + + return 0; +} |