summaryrefslogtreecommitdiffstats
path: root/lib/xlat_mpu/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'lib/xlat_mpu/aarch64')
-rw-r--r--lib/xlat_mpu/aarch64/enable_mpu.S53
-rw-r--r--lib/xlat_mpu/aarch64/xlat_mpu_arch.c69
2 files changed, 122 insertions, 0 deletions
diff --git a/lib/xlat_mpu/aarch64/enable_mpu.S b/lib/xlat_mpu/aarch64/enable_mpu.S
new file mode 100644
index 0000000..0dda979
--- /dev/null
+++ b/lib/xlat_mpu/aarch64/enable_mpu.S
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <platform_def.h>
+
+ .global enable_mpu_direct_el2
+
+ /* void enable_mmu_direct_el2(unsigned int flags) */
+func enable_mpu_direct_el2
+#if ENABLE_ASSERTIONS
+ mrs x1, sctlr_el2
+ tst x1, #SCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+ mov x7, x0
+ adrp x0, mmu_cfg_params
+ add x0, x0, :lo12:mmu_cfg_params
+
+ /* (MAIRs are already set up) */
+
+ /* TCR */
+ ldr x2, [x0, #(MMU_CFG_TCR << 3)]
+ msr tcr_el2, x2
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Set and clear required fields of SCTLR */
+ mrs x4, sctlr_el2
+ mov_imm x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT
+ orr x4, x4, x5
+
+ /* Additionally, amend SCTLR fields based on flags */
+ bic x5, x4, #SCTLR_C_BIT
+ tst x7, #DISABLE_DCACHE
+ csel x4, x5, x4, ne
+
+ msr sctlr_el2, x4
+ isb
+
+ ret
+endfunc enable_mpu_direct_el2
diff --git a/lib/xlat_mpu/aarch64/xlat_mpu_arch.c b/lib/xlat_mpu/aarch64/xlat_mpu_arch.c
new file mode 100644
index 0000000..5a2120b
--- /dev/null
+++ b/lib/xlat_mpu/aarch64/xlat_mpu_arch.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "../xlat_mpu_private.h"
+#include <arch.h>
+#include <arch_features.h>
+#include <lib/cassert.h>
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <fvp_r_arch_helpers.h>
+
+#warning "xlat_mpu library is currently experimental and its API may change in future."
+
+#if ENABLE_ASSERTIONS
+/*
+ * Return minimum virtual address space size supported by the architecture
+ */
+uintptr_t xlat_get_min_virt_addr_space_size(void)
+{
+ uintptr_t ret;
+
+ if (is_armv8_4_ttst_present()) {
+ ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST;
+ } else {
+ ret = MIN_VIRT_ADDR_SPACE_SIZE;
+ }
+ return ret;
+}
+#endif /* ENABLE_ASSERTIONS*/
+
+bool is_mpu_enabled_ctx(const xlat_ctx_t *ctx)
+{
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() >= 1U);
+ return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
+ } else {
+ assert(xlat_arch_current_el() >= 2U);
+ return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
+ }
+}
+
+bool is_dcache_enabled(void)
+{
+ unsigned int el = get_current_el();
+
+ if (el == 1U) {
+ return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
+ } else { /* must be EL2 */
+ return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
+ }
+}
+
+unsigned int xlat_arch_current_el(void)
+{
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+
+ assert(el > 0U);
+
+ return el;
+}
+