summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/hyp')
-rw-r--r--arch/arm64/kvm/hyp/aarch32.c4
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h130
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h24
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c4
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c36
-rw-r--r--arch/arm64/kvm/hyp/vhe/sysreg-sr.c2
8 files changed, 128 insertions, 86 deletions
diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c
index f98cbe2626..8d9670e661 100644
--- a/arch/arm64/kvm/hyp/aarch32.c
+++ b/arch/arm64/kvm/hyp/aarch32.c
@@ -84,7 +84,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
}
/**
- * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * kvm_adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
* @vcpu: The VCPU pointer
*
* When exceptions occur while instructions are executed in Thumb IF-THEN
@@ -120,7 +120,7 @@ static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
}
/**
- * kvm_skip_instr - skip a trapped instruction and proceed to the next
+ * kvm_skip_instr32 - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer
*/
void kvm_skip_instr32(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index a038320cdb..e3fcf8c4d5 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -79,14 +79,48 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
clr |= ~hfg & __ ## reg ## _nMASK; \
} while(0)
-#define update_fgt_traps_cs(vcpu, reg, clr, set) \
+#define reg_to_fgt_group_id(reg) \
+ ({ \
+ enum fgt_group_id id; \
+ switch(reg) { \
+ case HFGRTR_EL2: \
+ case HFGWTR_EL2: \
+ id = HFGxTR_GROUP; \
+ break; \
+ case HFGITR_EL2: \
+ id = HFGITR_GROUP; \
+ break; \
+ case HDFGRTR_EL2: \
+ case HDFGWTR_EL2: \
+ id = HDFGRTR_GROUP; \
+ break; \
+ case HAFGRTR_EL2: \
+ id = HAFGRTR_GROUP; \
+ break; \
+ default: \
+ BUILD_BUG_ON(1); \
+ } \
+ \
+ id; \
+ })
+
+#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
+ do { \
+ u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
+ set |= hfg & __ ## reg ## _MASK; \
+ clr |= hfg & __ ## reg ## _nMASK; \
+ } while(0)
+
+#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
do { \
- struct kvm_cpu_context *hctxt = \
- &this_cpu_ptr(&kvm_host_data)->host_ctxt; \
u64 c = 0, s = 0; \
\
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
- compute_clr_set(vcpu, reg, c, s); \
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \
+ compute_clr_set(vcpu, reg, c, s); \
+ \
+ compute_undef_clr_set(vcpu, kvm, reg, c, s); \
+ \
s |= set; \
c |= clr; \
if (c || s) { \
@@ -97,8 +131,8 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
} \
} while(0)
-#define update_fgt_traps(vcpu, reg) \
- update_fgt_traps_cs(vcpu, reg, 0, 0)
+#define update_fgt_traps(hctxt, vcpu, kvm, reg) \
+ update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
/*
* Validate the fine grain trap masks.
@@ -122,8 +156,7 @@ static inline bool cpu_has_amu(void)
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
- u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
- u64 r_val, w_val;
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
CHECK_FGT_MASKS(HFGRTR_EL2);
CHECK_FGT_MASKS(HFGWTR_EL2);
@@ -136,72 +169,45 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
- ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2);
- ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2);
-
- if (cpus_have_final_cap(ARM64_SME)) {
- tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
-
- r_clr |= tmp;
- w_clr |= tmp;
- }
-
- /*
- * Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD.
- */
- if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
- w_set |= HFGxTR_EL2_TCR_EL1_MASK;
-
- if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
- compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set);
- compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set);
- }
-
- /* The default to trap everything not handled or supported in KVM. */
- tmp = HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 |
- HFGxTR_EL2_nPOR_EL1 | HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1;
-
- r_val = __HFGRTR_EL2_nMASK & ~tmp;
- r_val |= r_set;
- r_val &= ~r_clr;
-
- w_val = __HFGWTR_EL2_nMASK & ~tmp;
- w_val |= w_set;
- w_val &= ~w_clr;
-
- write_sysreg_s(r_val, SYS_HFGRTR_EL2);
- write_sysreg_s(w_val, SYS_HFGWTR_EL2);
-
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
- return;
-
- update_fgt_traps(vcpu, HFGITR_EL2);
- update_fgt_traps(vcpu, HDFGRTR_EL2);
- update_fgt_traps(vcpu, HDFGWTR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
+ update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
+ cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
+ HFGxTR_EL2_TCR_EL1_MASK : 0);
+ update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
if (cpu_has_amu())
- update_fgt_traps(vcpu, HAFGRTR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
}
+#define __deactivate_fgt(htcxt, vcpu, kvm, reg) \
+ do { \
+ if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \
+ kvm->arch.fgu[reg_to_fgt_group_id(reg)]) \
+ write_sysreg_s(ctxt_sys_reg(hctxt, reg), \
+ SYS_ ## reg); \
+ } while(0)
+
static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
- write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2);
- write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
-
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
- return;
-
- write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2);
- write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
- write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HFGRTR_EL2);
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
+ else
+ __deactivate_fgt(hctxt, vcpu, kvm, HFGWTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HFGITR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HDFGRTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HDFGWTR_EL2);
if (cpu_has_amu())
- write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2);
}
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
@@ -230,7 +236,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
- u64 hcrx = HCRX_GUEST_FLAGS;
+ u64 hcrx = vcpu->arch.hcrx_el2;
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
u64 clr = 0, set = 0;
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index bb6b571ec6..4be6a7fa00 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -27,16 +27,34 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
}
-static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
+static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
if (!vcpu)
vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
+ return vcpu;
+}
+
+static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
+
return kvm_has_mte(kern_hyp_va(vcpu->kvm));
}
+static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu;
+
+ if (!cpus_have_final_cap(ARM64_HAS_S1PIE))
+ return false;
+
+ vcpu = ctxt_to_vcpu(ctxt);
+ return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1PIE, IMP);
+}
+
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
@@ -55,7 +73,7 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
- if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
+ if (ctxt_has_s1pie(ctxt)) {
ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR);
ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0);
}
@@ -131,7 +149,7 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
- if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
+ if (ctxt_has_s1pie(ctxt)) {
write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR);
write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 4558c02eb3..7746ea507b 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -31,8 +31,8 @@ static void __debug_save_spe(u64 *pmscr_el1)
return;
/* Yes; save the control register and disable data generation */
- *pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
- write_sysreg_s(0, SYS_PMSCR_EL1);
+ *pmscr_el1 = read_sysreg_el1(SYS_PMSCR);
+ write_sysreg_el1(0, SYS_PMSCR);
isb();
/* Now drain all buffered data to memory */
@@ -48,7 +48,7 @@ static void __debug_restore_spe(u64 pmscr_el1)
isb();
/* Re-enable data generation */
- write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
+ write_sysreg_el1(pmscr_el1, SYS_PMSCR);
}
static void __debug_save_trace(u64 *trfcr_el1)
@@ -63,8 +63,8 @@ static void __debug_save_trace(u64 *trfcr_el1)
* Since access to TRFCR_EL1 is trapped, the guest can't
* modify the filtering set by the host.
*/
- *trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1);
- write_sysreg_s(0, SYS_TRFCR_EL1);
+ *trfcr_el1 = read_sysreg_el1(SYS_TRFCR);
+ write_sysreg_el1(0, SYS_TRFCR);
isb();
/* Drain the trace buffer to memory */
tsb_csync();
@@ -76,7 +76,7 @@ static void __debug_restore_trace(u64 trfcr_el1)
return;
/* Restore trace filter controls */
- write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1);
+ write_sysreg_el1(trfcr_el1, SYS_TRFCR);
}
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 7693a6757c..135cfb294e 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -110,7 +110,7 @@ SYM_FUNC_END(__host_enter)
* u64 elr, u64 par);
*/
SYM_FUNC_START(__hyp_do_panic)
- /* Prepare and exit to the host's panic funciton. */
+ /* Prepare and exit to the host's panic function. */
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index b01a3d1078..8850b591d7 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -155,7 +155,7 @@ int hyp_back_vmemmap(phys_addr_t back)
start = hyp_memory[i].base;
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
/*
- * The begining of the hyp_vmemmap region for the current
+ * The beginning of the hyp_vmemmap region for the current
* memblock may already be backed by the page backing the end
* the previous region, so avoid mapping it twice.
*/
@@ -408,7 +408,7 @@ static void *admit_host_page(void *arg)
return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
}
-/* Refill our local memcache by poping pages from the one provided by the host. */
+/* Refill our local memcache by popping pages from the one provided by the host. */
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
struct kvm_hyp_memcache *host_mc)
{
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index ce5cef7d73..5a59ef88b6 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -528,7 +528,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
+ __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
} else {
if (ctx->end - ctx->addr < granule)
return -EINVAL;
@@ -717,15 +717,29 @@ void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
kvm_pte_t *ptep)
{
- bool device = prot & KVM_PGTABLE_PROT_DEVICE;
- kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
- KVM_S2_MEMATTR(pgt, NORMAL);
+ kvm_pte_t attr;
u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
+ switch (prot & (KVM_PGTABLE_PROT_DEVICE |
+ KVM_PGTABLE_PROT_NORMAL_NC)) {
+ case KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_NORMAL_NC:
+ return -EINVAL;
+ case KVM_PGTABLE_PROT_DEVICE:
+ if (prot & KVM_PGTABLE_PROT_X)
+ return -EINVAL;
+ attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
+ break;
+ case KVM_PGTABLE_PROT_NORMAL_NC:
+ if (prot & KVM_PGTABLE_PROT_X)
+ return -EINVAL;
+ attr = KVM_S2_MEMATTR(pgt, NORMAL_NC);
+ break;
+ default:
+ attr = KVM_S2_MEMATTR(pgt, NORMAL);
+ }
+
if (!(prot & KVM_PGTABLE_PROT_X))
attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
- else if (device)
- return -EINVAL;
if (prot & KVM_PGTABLE_PROT_R)
attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
@@ -885,9 +899,13 @@ static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
if (kvm_pte_valid(ctx->old)) {
kvm_clear_pte(ctx->ptep);
- if (!stage2_unmap_defer_tlb_flush(pgt))
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
- ctx->addr, ctx->level);
+ if (kvm_pte_table(ctx->old, ctx->level)) {
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
+ TLBI_TTL_UNKNOWN);
+ } else if (!stage2_unmap_defer_tlb_flush(pgt)) {
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
+ ctx->level);
+ }
}
mm_ops->put_page(ctx->ptep);
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 8e1e0d5033..a8b9ea4967 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -95,7 +95,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
}
/**
- * __vcpu_put_switch_syregs - Restore host system registers to the physical CPU
+ * __vcpu_put_switch_sysregs - Restore host system registers to the physical CPU
*
* @vcpu: The VCPU pointer
*