diff options
Diffstat (limited to 'lib/xlat_tables_v2/aarch64')
-rw-r--r-- | lib/xlat_tables_v2/aarch64/enable_mmu.S | 97 | ||||
-rw-r--r-- | lib/xlat_tables_v2/aarch64/xlat_tables_arch.c | 324 |
2 files changed, 421 insertions, 0 deletions
diff --git a/lib/xlat_tables_v2/aarch64/enable_mmu.S b/lib/xlat_tables_v2/aarch64/enable_mmu.S new file mode 100644 index 0000000..9f075e4 --- /dev/null +++ b/lib/xlat_tables_v2/aarch64/enable_mmu.S @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <asm_macros.S> +#include <assert_macros.S> +#include <lib/xlat_tables/xlat_tables_v2.h> + + .global enable_mmu_direct_el1 + .global enable_mmu_direct_el2 + .global enable_mmu_direct_el3 + + /* Macros to read and write to system register for a given EL. */ + .macro _msr reg_name, el, gp_reg + msr \reg_name\()_el\()\el, \gp_reg + .endm + + .macro _mrs gp_reg, reg_name, el + mrs \gp_reg, \reg_name\()_el\()\el + .endm + + .macro tlbi_invalidate_all el + .if \el == 1 + TLB_INVALIDATE(vmalle1) + .elseif \el == 2 + TLB_INVALIDATE(alle2) + .elseif \el == 3 + TLB_INVALIDATE(alle3) + .else + .error "EL must be 1, 2 or 3" + .endif + .endm + + /* void enable_mmu_direct_el<x>(unsigned int flags) */ + .macro define_mmu_enable_func el + func enable_mmu_direct_\()el\el +#if ENABLE_ASSERTIONS + _mrs x1, sctlr, \el + tst x1, #SCTLR_M_BIT + ASM_ASSERT(eq) +#endif + /* Invalidate all TLB entries */ + tlbi_invalidate_all \el + + mov x7, x0 + adrp x0, mmu_cfg_params + add x0, x0, :lo12:mmu_cfg_params + + /* MAIR */ + ldr x1, [x0, #(MMU_CFG_MAIR << 3)] + _msr mair, \el, x1 + + /* TCR */ + ldr x2, [x0, #(MMU_CFG_TCR << 3)] + _msr tcr, \el, x2 + + /* TTBR */ + ldr x3, [x0, #(MMU_CFG_TTBR0 << 3)] + _msr ttbr0, \el, x3 + + /* + * Ensure all translation table writes have drained into memory, the TLB + * invalidation is complete, and translation register writes are + * committed before enabling the MMU + */ + dsb ish + isb + + /* Set and clear required fields of SCTLR */ + _mrs x4, sctlr, \el + mov_imm x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT + orr x4, x4, x5 + + /* Additionally, amend SCTLR fields based on flags */ + bic x5, x4, #SCTLR_C_BIT + tst x7, #DISABLE_DCACHE + csel x4, x5, x4, ne + + _msr sctlr, \el, x4 + isb + + ret + endfunc enable_mmu_direct_\()el\el + .endm + + /* + * Define MMU-enabling functions for EL1, EL2 and EL3: + * + * enable_mmu_direct_el1 + * enable_mmu_direct_el2 + * enable_mmu_direct_el3 + */ + define_mmu_enable_func 1 + define_mmu_enable_func 2 + define_mmu_enable_func 3 diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c new file mode 100644 index 0000000..719110a --- /dev/null +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <assert.h> +#include <stdbool.h> +#include <stdint.h> + +#include <arch.h> +#include <arch_features.h> +#include <arch_helpers.h> +#include <lib/cassert.h> +#include <lib/utils_def.h> +#include <lib/xlat_tables/xlat_tables_v2.h> + +#include "../xlat_tables_private.h" + +/* + * Returns true if the provided granule size is supported, false otherwise. + */ +bool xlat_arch_is_granule_size_supported(size_t size) +{ + u_register_t id_aa64mmfr0_el1 = read_id_aa64mmfr0_el1(); + + if (size == PAGE_SIZE_4KB) { + return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) & + ID_AA64MMFR0_EL1_TGRAN4_MASK) == + ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED; + } else if (size == PAGE_SIZE_16KB) { + return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) & + ID_AA64MMFR0_EL1_TGRAN16_MASK) == + ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED; + } else if (size == PAGE_SIZE_64KB) { + return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) & + ID_AA64MMFR0_EL1_TGRAN64_MASK) == + ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED; + } else { + return 0; + } +} + +size_t xlat_arch_get_max_supported_granule_size(void) +{ + if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB)) { + return PAGE_SIZE_64KB; + } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB)) { + return PAGE_SIZE_16KB; + } else { + assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB)); + return PAGE_SIZE_4KB; + } +} + +/* + * Determine the physical address space encoded in the 'attr' parameter. + * + * The physical address will fall into one of four spaces; secure, + * nonsecure, root, or realm if RME is enabled, or one of two spaces; + * secure and nonsecure otherwise. + */ +uint32_t xlat_arch_get_pas(uint32_t attr) +{ + uint32_t pas = MT_PAS(attr); + + switch (pas) { +#if ENABLE_RME + /* TTD.NSE = 1 and TTD.NS = 1 for Realm PAS */ + case MT_REALM: + return LOWER_ATTRS(EL3_S1_NSE | NS); + /* TTD.NSE = 1 and TTD.NS = 0 for Root PAS */ + case MT_ROOT: + return LOWER_ATTRS(EL3_S1_NSE); +#endif + case MT_NS: + return LOWER_ATTRS(NS); + default: /* MT_SECURE */ + return 0U; + } +} + +unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr) +{ + /* Physical address can't exceed 48 bits */ + assert((max_addr & ADDR_MASK_48_TO_63) == 0U); + + /* 48 bits address */ + if ((max_addr & ADDR_MASK_44_TO_47) != 0U) + return TCR_PS_BITS_256TB; + + /* 44 bits address */ + if ((max_addr & ADDR_MASK_42_TO_43) != 0U) + return TCR_PS_BITS_16TB; + + /* 42 bits address */ + if ((max_addr & ADDR_MASK_40_TO_41) != 0U) + return TCR_PS_BITS_4TB; + + /* 40 bits address */ + if ((max_addr & ADDR_MASK_36_TO_39) != 0U) + return TCR_PS_BITS_1TB; + + /* 36 bits address */ + if ((max_addr & ADDR_MASK_32_TO_35) != 0U) + return TCR_PS_BITS_64GB; + + return TCR_PS_BITS_4GB; +} + +#if ENABLE_ASSERTIONS +/* + * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is + * supported in ARMv8.2 onwards. + */ +static const unsigned int pa_range_bits_arr[] = { + PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100, + PARANGE_0101, PARANGE_0110 +}; + +unsigned long long xlat_arch_get_max_supported_pa(void) +{ + u_register_t pa_range = read_id_aa64mmfr0_el1() & + ID_AA64MMFR0_EL1_PARANGE_MASK; + + /* All other values are reserved */ + assert(pa_range < ARRAY_SIZE(pa_range_bits_arr)); + + return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL; +} + +/* + * Return minimum virtual address space size supported by the architecture + */ +uintptr_t xlat_get_min_virt_addr_space_size(void) +{ + uintptr_t ret; + + if (is_armv8_4_ttst_present()) + ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST; + else + ret = MIN_VIRT_ADDR_SPACE_SIZE; + + return ret; +} +#endif /* ENABLE_ASSERTIONS*/ + +bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx) +{ + if (ctx->xlat_regime == EL1_EL0_REGIME) { + assert(xlat_arch_current_el() >= 1U); + return (read_sctlr_el1() & SCTLR_M_BIT) != 0U; + } else if (ctx->xlat_regime == EL2_REGIME) { + assert(xlat_arch_current_el() >= 2U); + return (read_sctlr_el2() & SCTLR_M_BIT) != 0U; + } else { + assert(ctx->xlat_regime == EL3_REGIME); + assert(xlat_arch_current_el() >= 3U); + return (read_sctlr_el3() & SCTLR_M_BIT) != 0U; + } +} + +bool is_dcache_enabled(void) +{ + unsigned int el = get_current_el_maybe_constant(); + + if (el == 1U) { + return (read_sctlr_el1() & SCTLR_C_BIT) != 0U; + } else if (el == 2U) { + return (read_sctlr_el2() & SCTLR_C_BIT) != 0U; + } else { + return (read_sctlr_el3() & SCTLR_C_BIT) != 0U; + } +} + +uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime) +{ + if (xlat_regime == EL1_EL0_REGIME) { + return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN); + } else { + assert((xlat_regime == EL2_REGIME) || + (xlat_regime == EL3_REGIME)); + return UPPER_ATTRS(XN); + } +} + +void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime) +{ + /* + * Ensure the translation table write has drained into memory before + * invalidating the TLB entry. + */ + dsbishst(); + + /* + * This function only supports invalidation of TLB entries for the EL3 + * and EL1&0 translation regimes. + * + * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher + * exception level (see section D4.9.2 of the ARM ARM rev B.a). + */ + if (xlat_regime == EL1_EL0_REGIME) { + assert(xlat_arch_current_el() >= 1U); + tlbivaae1is(TLBI_ADDR(va)); + } else if (xlat_regime == EL2_REGIME) { + assert(xlat_arch_current_el() >= 2U); + tlbivae2is(TLBI_ADDR(va)); + } else { + assert(xlat_regime == EL3_REGIME); + assert(xlat_arch_current_el() >= 3U); + tlbivae3is(TLBI_ADDR(va)); + } +} + +void xlat_arch_tlbi_va_sync(void) +{ + /* + * A TLB maintenance instruction can complete at any time after + * it is issued, but is only guaranteed to be complete after the + * execution of DSB by the PE that executed the TLB maintenance + * instruction. After the TLB invalidate instruction is + * complete, no new memory accesses using the invalidated TLB + * entries will be observed by any observer of the system + * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph + * "Ordering and completion of TLB maintenance instructions". + */ + dsbish(); + + /* + * The effects of a completed TLB maintenance instruction are + * only guaranteed to be visible on the PE that executed the + * instruction after the execution of an ISB instruction by the + * PE that executed the TLB maintenance instruction. + */ + isb(); +} + +unsigned int xlat_arch_current_el(void) +{ + unsigned int el = (unsigned int)GET_EL(read_CurrentEl()); + + assert(el > 0U); + + return el; +} + +void setup_mmu_cfg(uint64_t *params, unsigned int flags, + const uint64_t *base_table, unsigned long long max_pa, + uintptr_t max_va, int xlat_regime) +{ + uint64_t mair, ttbr0, tcr; + uintptr_t virtual_addr_space_size; + + /* Set attributes in the right indices of the MAIR. */ + mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); + mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX); + mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX); + + /* + * Limit the input address ranges and memory region sizes translated + * using TTBR0 to the given virtual address space size. + */ + assert(max_va < ((uint64_t)UINTPTR_MAX)); + + virtual_addr_space_size = (uintptr_t)max_va + 1U; + + assert(virtual_addr_space_size >= + xlat_get_min_virt_addr_space_size()); + assert(virtual_addr_space_size <= MAX_VIRT_ADDR_SPACE_SIZE); + assert(IS_POWER_OF_TWO(virtual_addr_space_size)); + + /* + * __builtin_ctzll(0) is undefined but here we are guaranteed that + * virtual_addr_space_size is in the range [1,UINTPTR_MAX]. + */ + int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size); + + tcr = (uint64_t)t0sz << TCR_T0SZ_SHIFT; + + /* + * Set the cacheability and shareability attributes for memory + * associated with translation table walks. + */ + if ((flags & XLAT_TABLE_NC) != 0U) { + /* Inner & outer non-cacheable non-shareable. */ + tcr |= TCR_SH_NON_SHAREABLE | + TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC; + } else { + /* Inner & outer WBWA & shareable. */ + tcr |= TCR_SH_INNER_SHAREABLE | + TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA; + } + + /* + * It is safer to restrict the max physical address accessible by the + * hardware as much as possible. + */ + unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa); + + if (xlat_regime == EL1_EL0_REGIME) { + /* + * TCR_EL1.EPD1: Disable translation table walk for addresses + * that are translated using TTBR1_EL1. + */ + tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT); + } else if (xlat_regime == EL2_REGIME) { + tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT); + } else { + assert(xlat_regime == EL3_REGIME); + tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT); + } + + /* Set TTBR bits as well */ + ttbr0 = (uint64_t) base_table; + + if (is_armv8_2_ttcnp_present()) { + /* Enable CnP bit so as to share page tables with all PEs. */ + ttbr0 |= TTBR_CNP_BIT; + } + + params[MMU_CFG_MAIR] = mair; + params[MMU_CFG_TCR] = tcr; + params[MMU_CFG_TTBR0] = ttbr0; +} |