summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
commitb20732900e4636a467c0183a47f7396700f5f743 (patch)
tree42f079ff82e701ebcb76829974b4caca3e5b6798 /arch/arm64/kvm
parentAdding upstream version 6.8.12. (diff)
downloadlinux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz
linux-b20732900e4636a467c0183a47f7396700f5f743.zip
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/Kconfig4
-rw-r--r--arch/arm64/kvm/arch_timer.c2
-rw-r--r--arch/arm64/kvm/arm.c73
-rw-r--r--arch/arm64/kvm/debug.c3
-rw-r--r--arch/arm64/kvm/emulate-nested.c231
-rw-r--r--arch/arm64/kvm/fpsimd.c3
-rw-r--r--arch/arm64/kvm/guest.c12
-rw-r--r--arch/arm64/kvm/hyp/aarch32.c22
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h130
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h24
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c4
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c36
-rw-r--r--arch/arm64/kvm/hyp/vhe/sysreg-sr.c2
-rw-r--r--arch/arm64/kvm/inject_fault.c2
-rw-r--r--arch/arm64/kvm/mmu.c33
-rw-r--r--arch/arm64/kvm/nested.c274
-rw-r--r--arch/arm64/kvm/pmu-emul.c15
-rw-r--r--arch/arm64/kvm/sys_regs.c263
-rw-r--r--arch/arm64/kvm/sys_regs.h2
-rw-r--r--arch/arm64/kvm/vgic/vgic-debug.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c12
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c63
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c15
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c3
-rw-r--r--arch/arm64/kvm/vgic/vgic.c62
-rw-r--r--arch/arm64/kvm/vgic/vgic.h17
28 files changed, 1010 insertions, 313 deletions
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 27ca89b628..58f09370d1 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -19,7 +19,6 @@ if VIRTUALIZATION
menuconfig KVM
bool "Kernel-based Virtual Machine (KVM) support"
- depends on HAVE_KVM
select KVM_COMMON
select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER
@@ -33,12 +32,11 @@ menuconfig KVM
select HAVE_KVM_MSI
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
- select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
+ select HAVE_KVM_READONLY_MEM
select HAVE_KVM_VCPU_RUN_PID_CHANGE
select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS
- select XARRAY_MULTI
help
Support hosting virtualized guest machines.
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 9dec8c419b..879982b1cc 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -745,7 +745,7 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
WARN_ON_ONCE(ret);
/*
- * The virtual offset behaviour is "interresting", as it
+ * The virtual offset behaviour is "interesting", as it
* always applies when HCR_EL2.E2H==0, but only when
* accessed from EL1 when HCR_EL2.E2H==1. So make sure we
* track E2H when putting the HV timer in "direct" mode.
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a25265aca4..6cda738a41 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -190,6 +190,27 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+void kvm_arch_create_vm_debugfs(struct kvm *kvm)
+{
+ kvm_sys_regs_create_debugfs(kvm);
+}
+
+static void kvm_destroy_mpidr_data(struct kvm *kvm)
+{
+ struct kvm_mpidr_data *data;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ data = rcu_dereference_protected(kvm->arch.mpidr_data,
+ lockdep_is_held(&kvm->arch.config_lock));
+ if (data) {
+ rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
+ synchronize_rcu();
+ kfree(data);
+ }
+
+ mutex_unlock(&kvm->arch.config_lock);
+}
/**
* kvm_arch_destroy_vm - destroy the VM data structure
@@ -205,7 +226,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (is_protected_kvm_enabled())
pkvm_destroy_hyp_vm(kvm);
- kfree(kvm->arch.mpidr_data);
+ kvm_destroy_mpidr_data(kvm);
+
+ kfree(kvm->arch.sysreg_masks);
kvm_destroy_vcpus(kvm);
kvm_unshare_hyp(kvm, kvm + 1);
@@ -390,6 +413,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+ /*
+ * This vCPU may have been created after mpidr_data was initialized.
+ * Throw out the pre-computed mappings if that is the case which forces
+ * KVM to fall back to iteratively searching the vCPUs.
+ */
+ kvm_destroy_mpidr_data(vcpu->kvm);
+
err = kvm_vgic_vcpu_init(vcpu);
if (err)
return err;
@@ -589,7 +619,8 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
mutex_lock(&kvm->arch.config_lock);
- if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
+ if (rcu_access_pointer(kvm->arch.mpidr_data) ||
+ atomic_read(&kvm->online_vcpus) == 1)
goto out;
kvm_for_each_vcpu(c, vcpu, kvm) {
@@ -626,7 +657,7 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
data->cmpidr_to_idx[index] = c;
}
- kvm->arch.mpidr_data = data;
+ rcu_assign_pointer(kvm->arch.mpidr_data, data);
out:
mutex_unlock(&kvm->arch.config_lock);
}
@@ -674,6 +705,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
return ret;
}
+ /*
+ * This needs to happen after NV has imposed its own restrictions on
+ * the feature set
+ */
+ kvm_init_sysreg(vcpu);
+
ret = kvm_timer_enable(vcpu);
if (ret)
return ret;
@@ -2459,21 +2496,27 @@ out_err:
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu = NULL;
+ struct kvm_mpidr_data *data;
unsigned long i;
mpidr &= MPIDR_HWID_BITMASK;
- if (kvm->arch.mpidr_data) {
- u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
+ rcu_read_lock();
+ data = rcu_dereference(kvm->arch.mpidr_data);
+
+ if (data) {
+ u16 idx = kvm_mpidr_index(data, mpidr);
- vcpu = kvm_get_vcpu(kvm,
- kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
+ vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
vcpu = NULL;
+ }
+
+ rcu_read_unlock();
+ if (vcpu)
return vcpu;
- }
kvm_for_each_vcpu(i, vcpu, kvm) {
if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
@@ -2586,13 +2629,11 @@ static __init int kvm_arm_init(void)
if (err)
goto out_hyp;
- if (is_protected_kvm_enabled()) {
- kvm_info("Protected nVHE mode initialized successfully\n");
- } else if (in_hyp_mode) {
- kvm_info("VHE mode initialized successfully\n");
- } else {
- kvm_info("Hyp mode initialized successfully\n");
- }
+ kvm_info("%s%sVHE mode initialized successfully\n",
+ in_hyp_mode ? "" : (is_protected_kvm_enabled() ?
+ "Protected " : "Hyp "),
+ in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ?
+ "h" : "n"));
/*
* FIXME: Do something reasonable if kvm_init() fails after pKVM
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 8725291cb0..ce8886122e 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -23,7 +23,7 @@
static DEFINE_PER_CPU(u64, mdcr_el2);
-/**
+/*
* save/restore_guest_debug_regs
*
* For some debug operations we need to tweak some guest registers. As
@@ -143,6 +143,7 @@ void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
/**
* kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
+ * @vcpu: the vcpu pointer
*/
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 431fd42993..4697ba41b3 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -427,12 +427,14 @@ static const complex_condition_check ccc[] = {
* [19:14] bit number in the FGT register (6 bits)
* [20] trap polarity (1 bit)
* [25:21] FG filter (5 bits)
- * [62:26] Unused (37 bits)
+ * [35:26] Main SysReg table index (10 bits)
+ * [62:36] Unused (27 bits)
* [63] RES0 - Must be zero, as lost on insertion in the xarray
*/
#define TC_CGT_BITS 10
#define TC_FGT_BITS 4
#define TC_FGF_BITS 5
+#define TC_SRI_BITS 10
union trap_config {
u64 val;
@@ -442,7 +444,8 @@ union trap_config {
unsigned long bit:6; /* Bit number */
unsigned long pol:1; /* Polarity */
unsigned long fgf:TC_FGF_BITS; /* Fine Grained Filter */
- unsigned long unused:37; /* Unused, should be zero */
+ unsigned long sri:TC_SRI_BITS; /* SysReg Index */
+ unsigned long unused:27; /* Unused, should be zero */
unsigned long mbz:1; /* Must Be Zero */
};
};
@@ -1006,18 +1009,6 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
static DEFINE_XARRAY(sr_forward_xa);
-enum fgt_group_id {
- __NO_FGT_GROUP__,
- HFGxTR_GROUP,
- HDFGRTR_GROUP,
- HDFGWTR_GROUP,
- HFGITR_GROUP,
- HAFGRTR_GROUP,
-
- /* Must be last */
- __NR_FGT_GROUP_IDS__
-};
-
enum fg_filter_id {
__NO_FGF__,
HCRX_FGTnXS,
@@ -1757,6 +1748,28 @@ static __init void print_nv_trap_error(const struct encoding_to_trap_config *tc,
err);
}
+static u32 encoding_next(u32 encoding)
+{
+ u8 op0, op1, crn, crm, op2;
+
+ op0 = sys_reg_Op0(encoding);
+ op1 = sys_reg_Op1(encoding);
+ crn = sys_reg_CRn(encoding);
+ crm = sys_reg_CRm(encoding);
+ op2 = sys_reg_Op2(encoding);
+
+ if (op2 < Op2_mask)
+ return sys_reg(op0, op1, crn, crm, op2 + 1);
+ if (crm < CRm_mask)
+ return sys_reg(op0, op1, crn, crm + 1, 0);
+ if (crn < CRn_mask)
+ return sys_reg(op0, op1, crn + 1, 0, 0);
+ if (op1 < Op1_mask)
+ return sys_reg(op0, op1 + 1, 0, 0, 0);
+
+ return sys_reg(op0 + 1, 0, 0, 0, 0);
+}
+
int __init populate_nv_trap_config(void)
{
int ret = 0;
@@ -1775,23 +1788,18 @@ int __init populate_nv_trap_config(void)
ret = -EINVAL;
}
- if (cgt->encoding != cgt->end) {
- prev = xa_store_range(&sr_forward_xa,
- cgt->encoding, cgt->end,
- xa_mk_value(cgt->tc.val),
- GFP_KERNEL);
- } else {
- prev = xa_store(&sr_forward_xa, cgt->encoding,
+ for (u32 enc = cgt->encoding; enc <= cgt->end; enc = encoding_next(enc)) {
+ prev = xa_store(&sr_forward_xa, enc,
xa_mk_value(cgt->tc.val), GFP_KERNEL);
if (prev && !xa_is_err(prev)) {
ret = -EINVAL;
print_nv_trap_error(cgt, "Duplicate CGT", ret);
}
- }
- if (xa_is_err(prev)) {
- ret = xa_err(prev);
- print_nv_trap_error(cgt, "Failed CGT insertion", ret);
+ if (xa_is_err(prev)) {
+ ret = xa_err(prev);
+ print_nv_trap_error(cgt, "Failed CGT insertion", ret);
+ }
}
}
@@ -1804,6 +1812,7 @@ int __init populate_nv_trap_config(void)
for (int i = 0; i < ARRAY_SIZE(encoding_to_fgt); i++) {
const struct encoding_to_trap_config *fgt = &encoding_to_fgt[i];
union trap_config tc;
+ void *prev;
if (fgt->tc.fgt >= __NR_FGT_GROUP_IDS__) {
ret = -EINVAL;
@@ -1818,8 +1827,13 @@ int __init populate_nv_trap_config(void)
}
tc.val |= fgt->tc.val;
- xa_store(&sr_forward_xa, fgt->encoding,
- xa_mk_value(tc.val), GFP_KERNEL);
+ prev = xa_store(&sr_forward_xa, fgt->encoding,
+ xa_mk_value(tc.val), GFP_KERNEL);
+
+ if (xa_is_err(prev)) {
+ ret = xa_err(prev);
+ print_nv_trap_error(fgt, "Failed FGT insertion", ret);
+ }
}
kvm_info("nv: %ld fine grained trap handlers\n",
@@ -1845,6 +1859,38 @@ check_mcb:
return ret;
}
+int __init populate_sysreg_config(const struct sys_reg_desc *sr,
+ unsigned int idx)
+{
+ union trap_config tc;
+ u32 encoding;
+ void *ret;
+
+ /*
+ * 0 is a valid value for the index, but not for the storage.
+ * We'll store (idx+1), so check against an offset'd limit.
+ */
+ if (idx >= (BIT(TC_SRI_BITS) - 1)) {
+ kvm_err("sysreg %s (%d) out of range\n", sr->name, idx);
+ return -EINVAL;
+ }
+
+ encoding = sys_reg(sr->Op0, sr->Op1, sr->CRn, sr->CRm, sr->Op2);
+ tc = get_trap_config(encoding);
+
+ if (tc.sri) {
+ kvm_err("sysreg %s (%d) duplicate entry (%d)\n",
+ sr->name, idx - 1, tc.sri);
+ return -EINVAL;
+ }
+
+ tc.sri = idx + 1;
+ ret = xa_store(&sr_forward_xa, encoding,
+ xa_mk_value(tc.val), GFP_KERNEL);
+
+ return xa_err(ret);
+}
+
static enum trap_behaviour get_behaviour(struct kvm_vcpu *vcpu,
const struct trap_bits *tb)
{
@@ -1892,20 +1938,64 @@ static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu,
return __compute_trap_behaviour(vcpu, tc.cgt, b);
}
-static bool check_fgt_bit(u64 val, const union trap_config tc)
+static u64 kvm_get_sysreg_res0(struct kvm *kvm, enum vcpu_sysreg sr)
{
- return ((val >> tc.bit) & 1) == tc.pol;
+ struct kvm_sysreg_masks *masks;
+
+ /* Only handle the VNCR-backed regs for now */
+ if (sr < __VNCR_START__)
+ return 0;
+
+ masks = kvm->arch.sysreg_masks;
+
+ return masks->mask[sr - __VNCR_START__].res0;
}
-#define sanitised_sys_reg(vcpu, reg) \
- ({ \
- u64 __val; \
- __val = __vcpu_sys_reg(vcpu, reg); \
- __val &= ~__ ## reg ## _RES0; \
- (__val); \
- })
+static bool check_fgt_bit(struct kvm *kvm, bool is_read,
+ u64 val, const union trap_config tc)
+{
+ enum vcpu_sysreg sr;
+
+ if (tc.pol)
+ return (val & BIT(tc.bit));
+
+ /*
+ * FGTs with negative polarities are an absolute nightmare, as
+ * we need to evaluate the bit in the light of the feature
+ * that defines it. WTF were they thinking?
+ *
+ * So let's check if the bit has been earmarked as RES0, as
+ * this indicates an unimplemented feature.
+ */
+ if (val & BIT(tc.bit))
+ return false;
+
+ switch ((enum fgt_group_id)tc.fgt) {
+ case HFGxTR_GROUP:
+ sr = is_read ? HFGRTR_EL2 : HFGWTR_EL2;
+ break;
+
+ case HDFGRTR_GROUP:
+ sr = is_read ? HDFGRTR_EL2 : HDFGWTR_EL2;
+ break;
+
+ case HAFGRTR_GROUP:
+ sr = HAFGRTR_EL2;
+ break;
+
+ case HFGITR_GROUP:
+ sr = HFGITR_EL2;
+ break;
+
+ default:
+ WARN_ONCE(1, "Unhandled FGT group");
+ return false;
+ }
+
+ return !(kvm_get_sysreg_res0(kvm, sr) & BIT(tc.bit));
+}
-bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
+bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
{
union trap_config tc;
enum trap_behaviour b;
@@ -1913,9 +2003,6 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
u32 sysreg;
u64 esr, val;
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
- return false;
-
esr = kvm_vcpu_get_esr(vcpu);
sysreg = esr_sys64_to_sysreg(esr);
is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
@@ -1926,13 +2013,27 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
* A value of 0 for the whole entry means that we know nothing
* for this sysreg, and that it cannot be re-injected into the
* nested hypervisor. In this situation, let's cut it short.
- *
- * Note that ultimately, we could also make use of the xarray
- * to store the index of the sysreg in the local descriptor
- * array, avoiding another search... Hint, hint...
*/
if (!tc.val)
- return false;
+ goto local;
+
+ /*
+ * If a sysreg can be trapped using a FGT, first check whether we
+ * trap for the purpose of forbidding the feature. In that case,
+ * inject an UNDEF.
+ */
+ if (tc.fgt != __NO_FGT_GROUP__ &&
+ (vcpu->kvm->arch.fgu[tc.fgt] & BIT(tc.bit))) {
+ kvm_inject_undefined(vcpu);
+ return true;
+ }
+
+ /*
+ * If we're not nesting, immediately return to the caller, with the
+ * sysreg index, should we have it.
+ */
+ if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ goto local;
switch ((enum fgt_group_id)tc.fgt) {
case __NO_FGT_GROUP__:
@@ -1940,25 +2041,24 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
case HFGxTR_GROUP:
if (is_read)
- val = sanitised_sys_reg(vcpu, HFGRTR_EL2);
+ val = __vcpu_sys_reg(vcpu, HFGRTR_EL2);
else
- val = sanitised_sys_reg(vcpu, HFGWTR_EL2);
+ val = __vcpu_sys_reg(vcpu, HFGWTR_EL2);
break;
case HDFGRTR_GROUP:
- case HDFGWTR_GROUP:
if (is_read)
- val = sanitised_sys_reg(vcpu, HDFGRTR_EL2);
+ val = __vcpu_sys_reg(vcpu, HDFGRTR_EL2);
else
- val = sanitised_sys_reg(vcpu, HDFGWTR_EL2);
+ val = __vcpu_sys_reg(vcpu, HDFGWTR_EL2);
break;
case HAFGRTR_GROUP:
- val = sanitised_sys_reg(vcpu, HAFGRTR_EL2);
+ val = __vcpu_sys_reg(vcpu, HAFGRTR_EL2);
break;
case HFGITR_GROUP:
- val = sanitised_sys_reg(vcpu, HFGITR_EL2);
+ val = __vcpu_sys_reg(vcpu, HFGITR_EL2);
switch (tc.fgf) {
u64 tmp;
@@ -1966,7 +2066,7 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
break;
case HCRX_FGTnXS:
- tmp = sanitised_sys_reg(vcpu, HCRX_EL2);
+ tmp = __vcpu_sys_reg(vcpu, HCRX_EL2);
if (tmp & HCRX_EL2_FGTnXS)
tc.fgt = __NO_FGT_GROUP__;
}
@@ -1975,10 +2075,11 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
case __NR_FGT_GROUP_IDS__:
/* Something is really wrong, bail out */
WARN_ONCE(1, "__NR_FGT_GROUP_IDS__");
- return false;
+ goto local;
}
- if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(val, tc))
+ if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu->kvm, is_read,
+ val, tc))
goto inject;
b = compute_trap_behaviour(vcpu, tc);
@@ -1987,6 +2088,26 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
((b & BEHAVE_FORWARD_WRITE) && !is_read))
goto inject;
+local:
+ if (!tc.sri) {
+ struct sys_reg_params params;
+
+ params = esr_sys64_to_params(esr);
+
+ /*
+ * Check for the IMPDEF range, as per DDI0487 J.a,
+ * D18.3.2 Reserved encodings for IMPLEMENTATION
+ * DEFINED registers.
+ */
+ if (!(params.Op0 == 3 && (params.CRn & 0b1011) == 0b1011))
+ print_sys_reg_msg(&params,
+ "Unsupported guest access at: %lx\n",
+ *vcpu_pc(vcpu));
+ kvm_inject_undefined(vcpu);
+ return true;
+ }
+
+ *sr_index = tc.sri - 1;
return false;
inject:
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 8c1d0d4853..826307e19e 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -117,7 +117,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
}
/*
- * Called just before entering the guest once we are no longer preemptable
+ * Called just before entering the guest once we are no longer preemptible
* and interrupts are disabled. If we have managed to run anything using
* FP while we were preemptible (such as off the back of an interrupt),
* then neither the host nor the guest own the FP hardware (and it was the
@@ -153,6 +153,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
fp_state.sve_vl = vcpu->arch.sve_max_vl;
fp_state.sme_state = NULL;
fp_state.svcr = &vcpu->arch.svcr;
+ fp_state.fpmr = &vcpu->arch.fpmr;
fp_state.fp_type = &vcpu->arch.fp_type;
if (vcpu_has_sve(vcpu))
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index aaf1d49397..11098eb7eb 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -251,6 +251,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND:
+ case PSR_AA32_MODE_SYS:
if (!vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break;
@@ -276,7 +277,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
int i, nr_reg;
- switch (*vcpu_cpsr(vcpu)) {
+ switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
/*
* Either we are dealing with user mode, and only the
* first 15 registers (+ PC) must be narrowed to 32bit.
@@ -711,6 +712,7 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
+ * @vcpu: the vCPU pointer
*
* This is for all registers.
*/
@@ -729,6 +731,8 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
/**
* kvm_arm_copy_reg_indices - get indices of all registers.
+ * @vcpu: the vCPU pointer
+ * @uindices: register list to copy
*
* We do core registers right here, then we append system regs.
*/
@@ -902,8 +906,8 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
/**
* kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
- * @kvm: pointer to the KVM struct
- * @kvm_guest_debug: the ioctl data buffer
+ * @vcpu: the vCPU pointer
+ * @dbg: the ioctl data buffer
*
* This sets up and enables the VM for guest debugging. Userspace
* passes in a control flag to enable different debug types and
@@ -1072,7 +1076,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
} else {
/*
* Only locking to serialise with a concurrent
- * set_pte_at() in the VMM but still overriding the
+ * __set_ptes() in the VMM but still overriding the
* tags, hence ignoring the return value.
*/
try_page_mte_tagging(page);
diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c
index f98cbe2626..449fa58cf3 100644
--- a/arch/arm64/kvm/hyp/aarch32.c
+++ b/arch/arm64/kvm/hyp/aarch32.c
@@ -50,9 +50,23 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
u32 cpsr_cond;
int cond;
- /* Top two bits non-zero? Unconditional. */
- if (kvm_vcpu_get_esr(vcpu) >> 30)
+ /*
+ * These are the exception classes that could fire with a
+ * conditional instruction.
+ */
+ switch (kvm_vcpu_trap_get_class(vcpu)) {
+ case ESR_ELx_EC_CP15_32:
+ case ESR_ELx_EC_CP15_64:
+ case ESR_ELx_EC_CP14_MR:
+ case ESR_ELx_EC_CP14_LS:
+ case ESR_ELx_EC_FP_ASIMD:
+ case ESR_ELx_EC_CP10_ID:
+ case ESR_ELx_EC_CP14_64:
+ case ESR_ELx_EC_SVC32:
+ break;
+ default:
return true;
+ }
/* Is condition field valid? */
cond = kvm_vcpu_get_condition(vcpu);
@@ -84,7 +98,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
}
/**
- * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * kvm_adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
* @vcpu: The VCPU pointer
*
* When exceptions occur while instructions are executed in Thumb IF-THEN
@@ -120,7 +134,7 @@ static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
}
/**
- * kvm_skip_instr - skip a trapped instruction and proceed to the next
+ * kvm_skip_instr32 - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer
*/
void kvm_skip_instr32(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index a038320cdb..e3fcf8c4d5 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -79,14 +79,48 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
clr |= ~hfg & __ ## reg ## _nMASK; \
} while(0)
-#define update_fgt_traps_cs(vcpu, reg, clr, set) \
+#define reg_to_fgt_group_id(reg) \
+ ({ \
+ enum fgt_group_id id; \
+ switch(reg) { \
+ case HFGRTR_EL2: \
+ case HFGWTR_EL2: \
+ id = HFGxTR_GROUP; \
+ break; \
+ case HFGITR_EL2: \
+ id = HFGITR_GROUP; \
+ break; \
+ case HDFGRTR_EL2: \
+ case HDFGWTR_EL2: \
+ id = HDFGRTR_GROUP; \
+ break; \
+ case HAFGRTR_EL2: \
+ id = HAFGRTR_GROUP; \
+ break; \
+ default: \
+ BUILD_BUG_ON(1); \
+ } \
+ \
+ id; \
+ })
+
+#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
+ do { \
+ u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
+ set |= hfg & __ ## reg ## _MASK; \
+ clr |= hfg & __ ## reg ## _nMASK; \
+ } while(0)
+
+#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
do { \
- struct kvm_cpu_context *hctxt = \
- &this_cpu_ptr(&kvm_host_data)->host_ctxt; \
u64 c = 0, s = 0; \
\
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
- compute_clr_set(vcpu, reg, c, s); \
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \
+ compute_clr_set(vcpu, reg, c, s); \
+ \
+ compute_undef_clr_set(vcpu, kvm, reg, c, s); \
+ \
s |= set; \
c |= clr; \
if (c || s) { \
@@ -97,8 +131,8 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
} \
} while(0)
-#define update_fgt_traps(vcpu, reg) \
- update_fgt_traps_cs(vcpu, reg, 0, 0)
+#define update_fgt_traps(hctxt, vcpu, kvm, reg) \
+ update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
/*
* Validate the fine grain trap masks.
@@ -122,8 +156,7 @@ static inline bool cpu_has_amu(void)
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
- u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
- u64 r_val, w_val;
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
CHECK_FGT_MASKS(HFGRTR_EL2);
CHECK_FGT_MASKS(HFGWTR_EL2);
@@ -136,72 +169,45 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
- ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2);
- ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2);
-
- if (cpus_have_final_cap(ARM64_SME)) {
- tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
-
- r_clr |= tmp;
- w_clr |= tmp;
- }
-
- /*
- * Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD.
- */
- if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
- w_set |= HFGxTR_EL2_TCR_EL1_MASK;
-
- if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
- compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set);
- compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set);
- }
-
- /* The default to trap everything not handled or supported in KVM. */
- tmp = HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 |
- HFGxTR_EL2_nPOR_EL1 | HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1;
-
- r_val = __HFGRTR_EL2_nMASK & ~tmp;
- r_val |= r_set;
- r_val &= ~r_clr;
-
- w_val = __HFGWTR_EL2_nMASK & ~tmp;
- w_val |= w_set;
- w_val &= ~w_clr;
-
- write_sysreg_s(r_val, SYS_HFGRTR_EL2);
- write_sysreg_s(w_val, SYS_HFGWTR_EL2);
-
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
- return;
-
- update_fgt_traps(vcpu, HFGITR_EL2);
- update_fgt_traps(vcpu, HDFGRTR_EL2);
- update_fgt_traps(vcpu, HDFGWTR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
+ update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
+ cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
+ HFGxTR_EL2_TCR_EL1_MASK : 0);
+ update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
if (cpu_has_amu())
- update_fgt_traps(vcpu, HAFGRTR_EL2);
+ update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
}
+#define __deactivate_fgt(htcxt, vcpu, kvm, reg) \
+ do { \
+ if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \
+ kvm->arch.fgu[reg_to_fgt_group_id(reg)]) \
+ write_sysreg_s(ctxt_sys_reg(hctxt, reg), \
+ SYS_ ## reg); \
+ } while(0)
+
static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
- write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2);
- write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
-
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
- return;
-
- write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2);
- write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
- write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HFGRTR_EL2);
+ if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
+ write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
+ else
+ __deactivate_fgt(hctxt, vcpu, kvm, HFGWTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HFGITR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HDFGRTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HDFGWTR_EL2);
if (cpu_has_amu())
- write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2);
+ __deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2);
}
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
@@ -230,7 +236,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
- u64 hcrx = HCRX_GUEST_FLAGS;
+ u64 hcrx = vcpu->arch.hcrx_el2;
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
u64 clr = 0, set = 0;
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index bb6b571ec6..4be6a7fa00 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -27,16 +27,34 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
}
-static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
+static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
if (!vcpu)
vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
+ return vcpu;
+}
+
+static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
+
return kvm_has_mte(kern_hyp_va(vcpu->kvm));
}
+static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu;
+
+ if (!cpus_have_final_cap(ARM64_HAS_S1PIE))
+ return false;
+
+ vcpu = ctxt_to_vcpu(ctxt);
+ return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1PIE, IMP);
+}
+
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
@@ -55,7 +73,7 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
- if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
+ if (ctxt_has_s1pie(ctxt)) {
ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR);
ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0);
}
@@ -131,7 +149,7 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
- if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
+ if (ctxt_has_s1pie(ctxt)) {
write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR);
write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 4558c02eb3..7746ea507b 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -31,8 +31,8 @@ static void __debug_save_spe(u64 *pmscr_el1)
return;
/* Yes; save the control register and disable data generation */
- *pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
- write_sysreg_s(0, SYS_PMSCR_EL1);
+ *pmscr_el1 = read_sysreg_el1(SYS_PMSCR);
+ write_sysreg_el1(0, SYS_PMSCR);
isb();
/* Now drain all buffered data to memory */
@@ -48,7 +48,7 @@ static void __debug_restore_spe(u64 pmscr_el1)
isb();
/* Re-enable data generation */
- write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
+ write_sysreg_el1(pmscr_el1, SYS_PMSCR);
}
static void __debug_save_trace(u64 *trfcr_el1)
@@ -63,8 +63,8 @@ static void __debug_save_trace(u64 *trfcr_el1)
* Since access to TRFCR_EL1 is trapped, the guest can't
* modify the filtering set by the host.
*/
- *trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1);
- write_sysreg_s(0, SYS_TRFCR_EL1);
+ *trfcr_el1 = read_sysreg_el1(SYS_TRFCR);
+ write_sysreg_el1(0, SYS_TRFCR);
isb();
/* Drain the trace buffer to memory */
tsb_csync();
@@ -76,7 +76,7 @@ static void __debug_restore_trace(u64 trfcr_el1)
return;
/* Restore trace filter controls */
- write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1);
+ write_sysreg_el1(trfcr_el1, SYS_TRFCR);
}
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 7693a6757c..135cfb294e 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -110,7 +110,7 @@ SYM_FUNC_END(__host_enter)
* u64 elr, u64 par);
*/
SYM_FUNC_START(__hyp_do_panic)
- /* Prepare and exit to the host's panic funciton. */
+ /* Prepare and exit to the host's panic function. */
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index b01a3d1078..8850b591d7 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -155,7 +155,7 @@ int hyp_back_vmemmap(phys_addr_t back)
start = hyp_memory[i].base;
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
/*
- * The begining of the hyp_vmemmap region for the current
+ * The beginning of the hyp_vmemmap region for the current
* memblock may already be backed by the page backing the end
* the previous region, so avoid mapping it twice.
*/
@@ -408,7 +408,7 @@ static void *admit_host_page(void *arg)
return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
}
-/* Refill our local memcache by poping pages from the one provided by the host. */
+/* Refill our local memcache by popping pages from the one provided by the host. */
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
struct kvm_hyp_memcache *host_mc)
{
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index ce5cef7d73..5a59ef88b6 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -528,7 +528,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
+ __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
} else {
if (ctx->end - ctx->addr < granule)
return -EINVAL;
@@ -717,15 +717,29 @@ void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
kvm_pte_t *ptep)
{
- bool device = prot & KVM_PGTABLE_PROT_DEVICE;
- kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
- KVM_S2_MEMATTR(pgt, NORMAL);
+ kvm_pte_t attr;
u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
+ switch (prot & (KVM_PGTABLE_PROT_DEVICE |
+ KVM_PGTABLE_PROT_NORMAL_NC)) {
+ case KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_NORMAL_NC:
+ return -EINVAL;
+ case KVM_PGTABLE_PROT_DEVICE:
+ if (prot & KVM_PGTABLE_PROT_X)
+ return -EINVAL;
+ attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
+ break;
+ case KVM_PGTABLE_PROT_NORMAL_NC:
+ if (prot & KVM_PGTABLE_PROT_X)
+ return -EINVAL;
+ attr = KVM_S2_MEMATTR(pgt, NORMAL_NC);
+ break;
+ default:
+ attr = KVM_S2_MEMATTR(pgt, NORMAL);
+ }
+
if (!(prot & KVM_PGTABLE_PROT_X))
attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
- else if (device)
- return -EINVAL;
if (prot & KVM_PGTABLE_PROT_R)
attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
@@ -885,9 +899,13 @@ static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
if (kvm_pte_valid(ctx->old)) {
kvm_clear_pte(ctx->ptep);
- if (!stage2_unmap_defer_tlb_flush(pgt))
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
- ctx->addr, ctx->level);
+ if (kvm_pte_table(ctx->old, ctx->level)) {
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
+ TLBI_TTL_UNKNOWN);
+ } else if (!stage2_unmap_defer_tlb_flush(pgt)) {
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
+ ctx->level);
+ }
}
mm_ops->put_page(ctx->ptep);
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 8e1e0d5033..a8b9ea4967 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -95,7 +95,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
}
/**
- * __vcpu_put_switch_syregs - Restore host system registers to the physical CPU
+ * __vcpu_put_switch_sysregs - Restore host system registers to the physical CPU
*
* @vcpu: The VCPU pointer
*
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 0bd93a5f21..a640e83984 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -134,7 +134,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
} else {
- /* no need to shuffle FS[4] into DFSR[10] as its 0 */
+ /* no need to shuffle FS[4] into DFSR[10] as it's 0 */
fsr = DFSR_FSC_EXTABT_nLPAE;
}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 92270acfc0..dc04bc7678 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -305,7 +305,7 @@ static void invalidate_icache_guest_page(void *va, size_t size)
* does.
*/
/**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * __unmap_stage2_range -- Clear stage2 page table entries to unmap a range
* @mmu: The KVM stage-2 MMU pointer
* @start: The intermediate physical base address of the range to unmap
* @size: The size of the area to unmap
@@ -805,7 +805,7 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
.pgd = (kvm_pteref_t)kvm->mm->pgd,
.ia_bits = vabits_actual,
.start_level = (KVM_PGTABLE_LAST_LEVEL -
- CONFIG_PGTABLE_LEVELS + 1),
+ ARM64_HW_PGTABLE_LEVELS(pgt.ia_bits) + 1),
.mm_ops = &kvm_user_mm_ops,
};
unsigned long flags;
@@ -1381,7 +1381,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
int ret = 0;
bool write_fault, writable, force_pte = false;
bool exec_fault, mte_allowed;
- bool device = false;
+ bool device = false, vfio_allow_any_uc = false;
unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
@@ -1472,6 +1472,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn = fault_ipa >> PAGE_SHIFT;
mte_allowed = kvm_vma_mte_allowed(vma);
+ vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
+
/* Don't use the VMA after the unlock -- it may have vanished */
vma = NULL;
@@ -1557,10 +1559,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault)
prot |= KVM_PGTABLE_PROT_X;
- if (device)
- prot |= KVM_PGTABLE_PROT_DEVICE;
- else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
+ if (device) {
+ if (vfio_allow_any_uc)
+ prot |= KVM_PGTABLE_PROT_NORMAL_NC;
+ else
+ prot |= KVM_PGTABLE_PROT_DEVICE;
+ } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) {
prot |= KVM_PGTABLE_PROT_X;
+ }
/*
* Under the premise of getting a FSC_PERM fault, we just need to relax
@@ -1874,16 +1880,9 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
/*
- * The ID map may be configured to use an extended virtual address
- * range. This is only the case if system RAM is out of range for the
- * currently configured page size and VA_BITS_MIN, in which case we will
- * also need the extended virtual range for the HYP ID map, or we won't
- * be able to enable the EL2 MMU.
- *
- * However, in some cases the ID map may be configured for fewer than
- * the number of VA bits used by the regular kernel stage 1. This
- * happens when VA_BITS=52 and the kernel image is placed in PA space
- * below 48 bits.
+ * The ID map is always configured for 48 bits of translation, which
+ * may be fewer than the number of VA bits used by the regular kernel
+ * stage 1, when VA_BITS=52.
*
* At EL2, there is only one TTBR register, and we can't switch between
* translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
@@ -1894,7 +1893,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
* 1 VA bits to assure that the hypervisor can both ID map its code page
* and map any kernel memory.
*/
- idmap_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
+ idmap_bits = IDMAP_VA_BITS;
kernel_bits = vabits_actual;
*hyp_va_bits = max(idmap_bits, kernel_bits);
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index ba95d044bc..ced30c9052 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -133,6 +133,13 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001);
break;
+ case SYS_ID_AA64MMFR4_EL1:
+ val = 0;
+ if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
+ val |= FIELD_PREP(NV_FTR(MMFR4, E2H0),
+ ID_AA64MMFR4_EL1_E2H0_NI_NV1);
+ break;
+
case SYS_ID_AA64DFR0_EL1:
/* Only limited support for PMU, Debug, BPs and WPs */
val &= (NV_FTR(DFR0, PMUVer) |
@@ -156,15 +163,280 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
return val;
}
+
+u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
+{
+ u64 v = ctxt_sys_reg(&vcpu->arch.ctxt, sr);
+ struct kvm_sysreg_masks *masks;
+
+ masks = vcpu->kvm->arch.sysreg_masks;
+
+ if (masks) {
+ sr -= __VNCR_START__;
+
+ v &= ~masks->mask[sr].res0;
+ v |= masks->mask[sr].res1;
+ }
+
+ return v;
+}
+
+static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
+{
+ int i = sr - __VNCR_START__;
+
+ kvm->arch.sysreg_masks->mask[i].res0 = res0;
+ kvm->arch.sysreg_masks->mask[i].res1 = res1;
+}
+
int kvm_init_nv_sysregs(struct kvm *kvm)
{
+ u64 res0, res1;
+ int ret = 0;
+
mutex_lock(&kvm->arch.config_lock);
+ if (kvm->arch.sysreg_masks)
+ goto out;
+
+ kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
+ GFP_KERNEL);
+ if (!kvm->arch.sysreg_masks) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
kvm->arch.id_regs[i]);
+ /* VTTBR_EL2 */
+ res0 = res1 = 0;
+ if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
+ res0 |= GENMASK(63, 56);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP))
+ res0 |= VTTBR_CNP_BIT;
+ set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
+
+ /* VTCR_EL2 */
+ res0 = GENMASK(63, 32) | GENMASK(30, 20);
+ res1 = BIT(31);
+ set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
+
+ /* VMPIDR_EL2 */
+ res0 = GENMASK(63, 40) | GENMASK(30, 24);
+ res1 = BIT(31);
+ set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
+
+ /* HCR_EL2 */
+ res0 = BIT(48);
+ res1 = HCR_RW;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP))
+ res0 |= GENMASK(63, 59);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2))
+ res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS))
+ res0 |= (HCR_TTLBIS | HCR_TTLBOS);
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
+ !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
+ res0 |= HCR_ENSCXT;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP))
+ res0 |= (HCR_TOCU | HCR_TICAB | HCR_TID4);
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
+ res0 |= HCR_AMVOFFEN;
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1))
+ res0 |= HCR_FIEN;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
+ res0 |= HCR_FWB;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2))
+ res0 |= HCR_NV2;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
+ res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
+ if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
+ __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
+ res0 |= (HCR_API | HCR_APK);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP))
+ res0 |= BIT(39);
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
+ res0 |= (HCR_TEA | HCR_TERR);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
+ res0 |= HCR_TLOR;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
+ res1 |= HCR_E2H;
+ set_sysreg_masks(kvm, HCR_EL2, res0, res1);
+
+ /* HCRX_EL2 */
+ res0 = HCRX_EL2_RES0;
+ res1 = HCRX_EL2_RES1;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR3_EL1, PACM, TRIVIAL_IMP))
+ res0 |= HCRX_EL2_PACMEn;
+ if (!kvm_has_feat(kvm, ID_AA64PFR2_EL1, FPMR, IMP))
+ res0 |= HCRX_EL2_EnFPM;
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
+ res0 |= HCRX_EL2_GCSEn;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP))
+ res0 |= HCRX_EL2_EnIDCP128;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, DEV_ASYNC))
+ res0 |= (HCRX_EL2_EnSDERR | HCRX_EL2_EnSNERR);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP))
+ res0 |= HCRX_EL2_TMEA;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP))
+ res0 |= HCRX_EL2_D128En;
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
+ res0 |= HCRX_EL2_PTTWI;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP))
+ res0 |= HCRX_EL2_SCTLR2En;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
+ res0 |= HCRX_EL2_TCR2En;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
+ res0 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP))
+ res0 |= HCRX_EL2_CMOW;
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP))
+ res0 |= (HCRX_EL2_VFNMI | HCRX_EL2_VINMI | HCRX_EL2_TALLINT);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) ||
+ !(read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS))
+ res0 |= HCRX_EL2_SMPME;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
+ res0 |= (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V))
+ res0 |= HCRX_EL2_EnASR;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64))
+ res0 |= HCRX_EL2_EnALS;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
+ res0 |= HCRX_EL2_EnAS0;
+ set_sysreg_masks(kvm, HCRX_EL2, res0, res1);
+
+ /* HFG[RW]TR_EL2 */
+ res0 = res1 = 0;
+ if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
+ __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
+ res0 |= (HFGxTR_EL2_APDAKey | HFGxTR_EL2_APDBKey |
+ HFGxTR_EL2_APGAKey | HFGxTR_EL2_APIAKey |
+ HFGxTR_EL2_APIBKey);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
+ res0 |= (HFGxTR_EL2_LORC_EL1 | HFGxTR_EL2_LOREA_EL1 |
+ HFGxTR_EL2_LORID_EL1 | HFGxTR_EL2_LORN_EL1 |
+ HFGxTR_EL2_LORSA_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
+ !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
+ res0 |= (HFGxTR_EL2_SCXTNUM_EL1 | HFGxTR_EL2_SCXTNUM_EL0);
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP))
+ res0 |= HFGxTR_EL2_ICC_IGRPENn_EL1;
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
+ res0 |= (HFGxTR_EL2_ERRIDR_EL1 | HFGxTR_EL2_ERRSELR_EL1 |
+ HFGxTR_EL2_ERXFR_EL1 | HFGxTR_EL2_ERXCTLR_EL1 |
+ HFGxTR_EL2_ERXSTATUS_EL1 | HFGxTR_EL2_ERXMISCn_EL1 |
+ HFGxTR_EL2_ERXPFGF_EL1 | HFGxTR_EL2_ERXPFGCTL_EL1 |
+ HFGxTR_EL2_ERXPFGCDN_EL1 | HFGxTR_EL2_ERXADDR_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
+ res0 |= HFGxTR_EL2_nACCDATA_EL1;
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
+ res0 |= (HFGxTR_EL2_nGCS_EL0 | HFGxTR_EL2_nGCS_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
+ res0 |= (HFGxTR_EL2_nSMPRI_EL1 | HFGxTR_EL2_nTPIDR2_EL0);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
+ res0 |= HFGxTR_EL2_nRCWMASK_EL1;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
+ res0 |= (HFGxTR_EL2_nPIRE0_EL1 | HFGxTR_EL2_nPIR_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
+ res0 |= (HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nPOR_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
+ res0 |= HFGxTR_EL2_nS2POR_EL1;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP))
+ res0 |= (HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nAMAIR2_EL1);
+ set_sysreg_masks(kvm, HFGRTR_EL2, res0 | __HFGRTR_EL2_RES0, res1);
+ set_sysreg_masks(kvm, HFGWTR_EL2, res0 | __HFGWTR_EL2_RES0, res1);
+
+ /* HDFG[RW]TR_EL2 */
+ res0 = res1 = 0;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
+ res0 |= HDFGRTR_EL2_OSDLR_EL1;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
+ res0 |= (HDFGRTR_EL2_PMEVCNTRn_EL0 | HDFGRTR_EL2_PMEVTYPERn_EL0 |
+ HDFGRTR_EL2_PMCCFILTR_EL0 | HDFGRTR_EL2_PMCCNTR_EL0 |
+ HDFGRTR_EL2_PMCNTEN | HDFGRTR_EL2_PMINTEN |
+ HDFGRTR_EL2_PMOVS | HDFGRTR_EL2_PMSELR_EL0 |
+ HDFGRTR_EL2_PMMIR_EL1 | HDFGRTR_EL2_PMUSERENR_EL0 |
+ HDFGRTR_EL2_PMCEIDn_EL0);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP))
+ res0 |= (HDFGRTR_EL2_PMBLIMITR_EL1 | HDFGRTR_EL2_PMBPTR_EL1 |
+ HDFGRTR_EL2_PMBSR_EL1 | HDFGRTR_EL2_PMSCR_EL1 |
+ HDFGRTR_EL2_PMSEVFR_EL1 | HDFGRTR_EL2_PMSFCR_EL1 |
+ HDFGRTR_EL2_PMSICR_EL1 | HDFGRTR_EL2_PMSIDR_EL1 |
+ HDFGRTR_EL2_PMSIRR_EL1 | HDFGRTR_EL2_PMSLATFR_EL1 |
+ HDFGRTR_EL2_PMBIDR_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
+ res0 |= (HDFGRTR_EL2_TRC | HDFGRTR_EL2_TRCAUTHSTATUS |
+ HDFGRTR_EL2_TRCAUXCTLR | HDFGRTR_EL2_TRCCLAIM |
+ HDFGRTR_EL2_TRCCNTVRn | HDFGRTR_EL2_TRCID |
+ HDFGRTR_EL2_TRCIMSPECn | HDFGRTR_EL2_TRCOSLSR |
+ HDFGRTR_EL2_TRCPRGCTLR | HDFGRTR_EL2_TRCSEQSTR |
+ HDFGRTR_EL2_TRCSSCSRn | HDFGRTR_EL2_TRCSTATR |
+ HDFGRTR_EL2_TRCVICTLR);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP))
+ res0 |= (HDFGRTR_EL2_TRBBASER_EL1 | HDFGRTR_EL2_TRBIDR_EL1 |
+ HDFGRTR_EL2_TRBLIMITR_EL1 | HDFGRTR_EL2_TRBMAR_EL1 |
+ HDFGRTR_EL2_TRBPTR_EL1 | HDFGRTR_EL2_TRBSR_EL1 |
+ HDFGRTR_EL2_TRBTRG_EL1);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
+ res0 |= (HDFGRTR_EL2_nBRBIDR | HDFGRTR_EL2_nBRBCTL |
+ HDFGRTR_EL2_nBRBDATA);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2))
+ res0 |= HDFGRTR_EL2_nPMSNEVFR_EL1;
+ set_sysreg_masks(kvm, HDFGRTR_EL2, res0 | HDFGRTR_EL2_RES0, res1);
+
+ /* Reuse the bits from the read-side and add the write-specific stuff */
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
+ res0 |= (HDFGWTR_EL2_PMCR_EL0 | HDFGWTR_EL2_PMSWINC_EL0);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
+ res0 |= HDFGWTR_EL2_TRCOSLAR;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
+ res0 |= HDFGWTR_EL2_TRFCR_EL1;
+ set_sysreg_masks(kvm, HFGWTR_EL2, res0 | HDFGWTR_EL2_RES0, res1);
+
+ /* HFGITR_EL2 */
+ res0 = HFGITR_EL2_RES0;
+ res1 = HFGITR_EL2_RES1;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, DPB, DPB2))
+ res0 |= HFGITR_EL2_DCCVADP;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2))
+ res0 |= (HFGITR_EL2_ATS1E1RP | HFGITR_EL2_ATS1E1WP);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
+ res0 |= (HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
+ HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS |
+ HFGITR_EL2_TLBIVAALE1OS | HFGITR_EL2_TLBIVALE1OS |
+ HFGITR_EL2_TLBIVAAE1OS | HFGITR_EL2_TLBIASIDE1OS |
+ HFGITR_EL2_TLBIVAE1OS | HFGITR_EL2_TLBIVMALLE1OS);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
+ res0 |= (HFGITR_EL2_TLBIRVAALE1 | HFGITR_EL2_TLBIRVALE1 |
+ HFGITR_EL2_TLBIRVAAE1 | HFGITR_EL2_TLBIRVAE1 |
+ HFGITR_EL2_TLBIRVAALE1IS | HFGITR_EL2_TLBIRVALE1IS |
+ HFGITR_EL2_TLBIRVAAE1IS | HFGITR_EL2_TLBIRVAE1IS |
+ HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
+ HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, IMP))
+ res0 |= (HFGITR_EL2_CFPRCTX | HFGITR_EL2_DVPRCTX |
+ HFGITR_EL2_CPPRCTX);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
+ res0 |= (HFGITR_EL2_nBRBINJ | HFGITR_EL2_nBRBIALL);
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
+ res0 |= (HFGITR_EL2_nGCSPUSHM_EL1 | HFGITR_EL2_nGCSSTR_EL1 |
+ HFGITR_EL2_nGCSEPP);
+ if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX))
+ res0 |= HFGITR_EL2_COSPRCTX;
+ if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP))
+ res0 |= HFGITR_EL2_ATS1E1A;
+ set_sysreg_masks(kvm, HFGITR_EL2, res0, res1);
+
+ /* HAFGRTR_EL2 - not a lot to see here */
+ res0 = HAFGRTR_EL2_RES0;
+ res1 = HAFGRTR_EL2_RES1;
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
+ res0 |= ~(res0 | res1);
+ set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
+out:
mutex_unlock(&kvm->arch.config_lock);
- return 0;
+ return ret;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 3d9467ff73..a35ce10e0a 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -64,12 +64,11 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
{
u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
kvm_pmu_event_mask(kvm);
- u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
- if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
+ if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP))
mask |= ARMV8_PMU_INCLUDE_EL2;
- if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0))
+ if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
ARMV8_PMU_EXCLUDE_NS_EL1 |
ARMV8_PMU_EXCLUDE_EL3;
@@ -83,8 +82,10 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
*/
static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
{
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+
return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
- kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
+ kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5));
}
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
@@ -419,7 +420,7 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
kvm_pmu_update_state(vcpu);
}
-/**
+/*
* When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
* to the event.
* This is why we need a callback to do it once outside of the NMI context.
@@ -490,7 +491,7 @@ static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
return val;
}
-/**
+/*
* When the perf event overflows, set the overflow status and inform the vcpu.
*/
static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
@@ -556,7 +557,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
return;
/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
- if (!kvm_pmu_is_3p5(vcpu))
+ if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
val &= ~ARMV8_PMU_PMCR_LP;
/* The reset bits don't indicate any state, and shouldn't be saved. */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 30253bd199..c9f4f38715 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -12,6 +12,7 @@
#include <linux/bitfield.h>
#include <linux/bsearch.h>
#include <linux/cacheinfo.h>
+#include <linux/debugfs.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
#include <linux/printk.h>
@@ -505,10 +506,9 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1);
u32 sr = reg_to_encoding(r);
- if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
+ if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) {
kvm_inject_undefined(vcpu);
return false;
}
@@ -1685,7 +1685,8 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
(val) &= ~reg##_##field##_MASK; \
(val) |= FIELD_PREP(reg##_##field##_MASK, \
- min(__f_val, (u64)reg##_##field##_##limit)); \
+ min(__f_val, \
+ (u64)SYS_FIELD_VALUE(reg, field, limit))); \
(val); \
})
@@ -2174,6 +2175,16 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
return true;
}
+static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ u64 val = r->val;
+
+ if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
+ val |= HCR_E2H;
+
+ return __vcpu_sys_reg(vcpu, r->reg) = val;
+}
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -2186,16 +2197,6 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
* guest...
*/
static const struct sys_reg_desc sys_reg_descs[] = {
- { SYS_DESC(SYS_DC_ISW), access_dcsw },
- { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
- { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CSW), access_dcsw },
- { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CISW), access_dcsw },
- { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
- { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
-
DBG_BCR_BVR_WCR_WVR_EL1(0),
DBG_BCR_BVR_WCR_WVR_EL1(1),
{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
@@ -2349,7 +2350,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64MMFR2_EL1_NV |
ID_AA64MMFR2_EL1_CCIDX)),
ID_SANITISED(ID_AA64MMFR3_EL1),
- ID_UNALLOCATED(7,4),
+ ID_SANITISED(ID_AA64MMFR4_EL1),
ID_UNALLOCATED(7,5),
ID_UNALLOCATED(7,6),
ID_UNALLOCATED(7,7),
@@ -2665,7 +2666,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
- EL2_REG_VNCR(HCR_EL2, reset_val, 0),
+ EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
@@ -2727,6 +2728,18 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
};
+static struct sys_reg_desc sys_insn_descs[] = {
+ { SYS_DESC(SYS_DC_ISW), access_dcsw },
+ { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_CSW), access_dcsw },
+ { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_CISW), access_dcsw },
+ { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
+ { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
+};
+
static const struct sys_reg_desc *first_idreg;
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
@@ -2737,8 +2750,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
return ignore_write(vcpu, p);
} else {
u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
- u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1);
- u32 el3 = !!SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr);
+ u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
(SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
@@ -3159,7 +3171,8 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
/**
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
* @vcpu: The VCPU pointer
- * @run: The kvm_run struct
+ * @global: &struct sys_reg_desc
+ * @nr_global: size of the @global array
*/
static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *global,
@@ -3326,7 +3339,9 @@ static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
/**
* kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
* @vcpu: The VCPU pointer
- * @run: The kvm_run struct
+ * @params: &struct sys_reg_params
+ * @global: &struct sys_reg_desc
+ * @nr_global: size of the @global array
*/
static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
@@ -3384,12 +3399,6 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
}
-static bool is_imp_def_sys_reg(struct sys_reg_params *params)
-{
- // See ARM DDI 0487E.a, section D12.3.2
- return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
-}
-
/**
* emulate_sys_reg - Emulate a guest access to an AArch64 system register
* @vcpu: The VCPU pointer
@@ -3398,26 +3407,106 @@ static bool is_imp_def_sys_reg(struct sys_reg_params *params)
* Return: true if the system register access was successful, false otherwise.
*/
static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
- struct sys_reg_params *params)
+ struct sys_reg_params *params)
{
const struct sys_reg_desc *r;
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
-
if (likely(r)) {
perform_access(vcpu, params, r);
return true;
}
- if (is_imp_def_sys_reg(params)) {
- kvm_inject_undefined(vcpu);
+ print_sys_reg_msg(params,
+ "Unsupported guest sys_reg access at: %lx [%08lx]\n",
+ *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
+ kvm_inject_undefined(vcpu);
+
+ return false;
+}
+
+static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
+{
+ struct kvm *kvm = s->private;
+ u8 *iter;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ iter = &kvm->arch.idreg_debugfs_iter;
+ if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
+ *iter == (u8)~0) {
+ *iter = *pos;
+ if (*iter >= KVM_ARM_ID_REG_NUM)
+ iter = NULL;
} else {
- print_sys_reg_msg(params,
- "Unsupported guest sys_reg access at: %lx [%08lx]\n",
- *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
- kvm_inject_undefined(vcpu);
+ iter = ERR_PTR(-EBUSY);
}
- return false;
+
+ mutex_unlock(&kvm->arch.config_lock);
+
+ return iter;
+}
+
+static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct kvm *kvm = s->private;
+
+ (*pos)++;
+
+ if ((kvm->arch.idreg_debugfs_iter + 1) < KVM_ARM_ID_REG_NUM) {
+ kvm->arch.idreg_debugfs_iter++;
+
+ return &kvm->arch.idreg_debugfs_iter;
+ }
+
+ return NULL;
+}
+
+static void idregs_debug_stop(struct seq_file *s, void *v)
+{
+ struct kvm *kvm = s->private;
+
+ if (IS_ERR(v))
+ return;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ kvm->arch.idreg_debugfs_iter = ~0;
+
+ mutex_unlock(&kvm->arch.config_lock);
+}
+
+static int idregs_debug_show(struct seq_file *s, void *v)
+{
+ struct kvm *kvm = s->private;
+ const struct sys_reg_desc *desc;
+
+ desc = first_idreg + kvm->arch.idreg_debugfs_iter;
+
+ if (!desc->name)
+ return 0;
+
+ seq_printf(s, "%20s:\t%016llx\n",
+ desc->name, IDREG(kvm, IDX_IDREG(kvm->arch.idreg_debugfs_iter)));
+
+ return 0;
+}
+
+static const struct seq_operations idregs_debug_sops = {
+ .start = idregs_debug_start,
+ .next = idregs_debug_next,
+ .stop = idregs_debug_stop,
+ .show = idregs_debug_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(idregs_debug);
+
+void kvm_sys_regs_create_debugfs(struct kvm *kvm)
+{
+ kvm->arch.idreg_debugfs_iter = ~0;
+
+ debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
+ &idregs_debug_fops);
}
static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
@@ -3467,28 +3556,39 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
}
/**
- * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
+ * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
+ * trap on a guest execution
* @vcpu: The VCPU pointer
*/
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
{
+ const struct sys_reg_desc *desc = NULL;
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_esr(vcpu);
int Rt = kvm_vcpu_sys_get_rt(vcpu);
+ int sr_idx;
trace_kvm_handle_sys_reg(esr);
- if (__check_nv_sr_forward(vcpu))
+ if (triage_sysreg_trap(vcpu, &sr_idx))
return 1;
params = esr_sys64_to_params(esr);
params.regval = vcpu_get_reg(vcpu, Rt);
- if (!emulate_sys_reg(vcpu, &params))
- return 1;
+ /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
+ if (params.Op0 == 2 || params.Op0 == 3)
+ desc = &sys_reg_descs[sr_idx];
+ else
+ desc = &sys_insn_descs[sr_idx];
+
+ perform_access(vcpu, &params, desc);
- if (!params.is_write)
+ /* Read from system register? */
+ if (!params.is_write &&
+ (params.Op0 == 2 || params.Op0 == 3))
vcpu_set_reg(vcpu, Rt, params.regval);
+
return 1;
}
@@ -3930,11 +4030,84 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *
return 0;
}
+void kvm_init_sysreg(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ /*
+ * In the absence of FGT, we cannot independently trap TLBI
+ * Range instructions. This isn't great, but trapping all
+ * TLBIs would be far worse. Live with it...
+ */
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
+ vcpu->arch.hcr_el2 |= HCR_TTLBOS;
+
+ if (cpus_have_final_cap(ARM64_HAS_HCX)) {
+ vcpu->arch.hcrx_el2 = HCRX_GUEST_FLAGS;
+
+ if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
+ vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
+ }
+
+ if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
+ goto out;
+
+ kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 |
+ HFGxTR_EL2_nMAIR2_EL1 |
+ HFGxTR_EL2_nS2POR_EL1 |
+ HFGxTR_EL2_nPOR_EL1 |
+ HFGxTR_EL2_nPOR_EL0 |
+ HFGxTR_EL2_nACCDATA_EL1 |
+ HFGxTR_EL2_nSMPRI_EL1_MASK |
+ HFGxTR_EL2_nTPIDR2_EL0_MASK);
+
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
+ kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS|
+ HFGITR_EL2_TLBIRVALE1OS |
+ HFGITR_EL2_TLBIRVAAE1OS |
+ HFGITR_EL2_TLBIRVAE1OS |
+ HFGITR_EL2_TLBIVAALE1OS |
+ HFGITR_EL2_TLBIVALE1OS |
+ HFGITR_EL2_TLBIVAAE1OS |
+ HFGITR_EL2_TLBIASIDE1OS |
+ HFGITR_EL2_TLBIVAE1OS |
+ HFGITR_EL2_TLBIVMALLE1OS);
+
+ if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
+ kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 |
+ HFGITR_EL2_TLBIRVALE1 |
+ HFGITR_EL2_TLBIRVAAE1 |
+ HFGITR_EL2_TLBIRVAE1 |
+ HFGITR_EL2_TLBIRVAALE1IS|
+ HFGITR_EL2_TLBIRVALE1IS |
+ HFGITR_EL2_TLBIRVAAE1IS |
+ HFGITR_EL2_TLBIRVAE1IS |
+ HFGITR_EL2_TLBIRVAALE1OS|
+ HFGITR_EL2_TLBIRVALE1OS |
+ HFGITR_EL2_TLBIRVAAE1OS |
+ HFGITR_EL2_TLBIRVAE1OS);
+
+ if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
+ kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
+ HFGxTR_EL2_nPIR_EL1);
+
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
+ kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
+ HAFGRTR_EL2_RES1);
+
+ set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
+out:
+ mutex_unlock(&kvm->arch.config_lock);
+}
+
int __init kvm_sys_reg_table_init(void)
{
struct sys_reg_params params;
bool valid = true;
unsigned int i;
+ int ret = 0;
/* Make sure tables are unique and in order. */
valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
@@ -3943,6 +4116,7 @@ int __init kvm_sys_reg_table_init(void)
valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
+ valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
if (!valid)
return -EINVAL;
@@ -3957,8 +4131,13 @@ int __init kvm_sys_reg_table_init(void)
if (!first_idreg)
return -EINVAL;
- if (kvm_get_mode() == KVM_MODE_NV)
- return populate_nv_trap_config();
+ ret = populate_nv_trap_config();
- return 0;
+ for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
+ ret = populate_sysreg_config(sys_reg_descs + i, i);
+
+ for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
+ ret = populate_sysreg_config(sys_insn_descs + i, i);
+
+ return ret;
}
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index c65c129b35..997eea21ba 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -233,6 +233,8 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
const struct sys_reg_desc table[], unsigned int num);
+bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index);
+
#define AA32(_x) .aarch32_map = AA32_##_x
#define Op0(_x) .Op0 = _x
#define Op1(_x) .Op1 = _x
diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
index 85606a531d..389025ce77 100644
--- a/arch/arm64/kvm/vgic/vgic-debug.c
+++ b/arch/arm64/kvm/vgic/vgic-debug.c
@@ -149,7 +149,7 @@ static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
seq_printf(s, "vgic_model:\t%s\n", v3 ? "GICv3" : "GICv2");
seq_printf(s, "nr_spis:\t%d\n", dist->nr_spis);
if (v3)
- seq_printf(s, "nr_lpis:\t%d\n", dist->lpi_list_count);
+ seq_printf(s, "nr_lpis:\t%d\n", atomic_read(&dist->lpi_count));
seq_printf(s, "enabled:\t%d\n", dist->enabled);
seq_printf(s, "\n");
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index e949e1d0fd..ce3bcff34b 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -53,9 +53,9 @@ void kvm_vgic_early_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- INIT_LIST_HEAD(&dist->lpi_list_head);
INIT_LIST_HEAD(&dist->lpi_translation_cache);
raw_spin_lock_init(&dist->lpi_list_lock);
+ xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
}
/* CREATION */
@@ -309,7 +309,7 @@ int vgic_init(struct kvm *kvm)
vgic_lpi_translation_cache_init(kvm);
/*
- * If we have GICv4.1 enabled, unconditionnaly request enable the
+ * If we have GICv4.1 enabled, unconditionally request enable the
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
* enable it if we present a virtual ITS to the guest.
*/
@@ -355,7 +355,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
- vgic_v3_free_redist_region(rdreg);
+ vgic_v3_free_redist_region(kvm, rdreg);
INIT_LIST_HEAD(&dist->rd_regions);
} else {
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
@@ -366,6 +366,8 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
if (vgic_supports_direct_msis(kvm))
vgic_v4_teardown(kvm);
+
+ xa_destroy(&dist->lpi_xa);
}
static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -445,13 +447,15 @@ int vgic_lazy_init(struct kvm *kvm)
/* RESOURCE MAPPING */
/**
+ * kvm_vgic_map_resources - map the MMIO regions
+ * @kvm: kvm struct pointer
+ *
* Map the MMIO regions depending on the VGIC model exposed to the guest
* called on the first VCPU run.
* Also map the virtual CPU interface into the VM.
* v2 calls vgic_init() if not already done.
* v3 and derivatives return an error if the VGIC is not initialized.
* vgic_ready() returns true if this function has succeeded.
- * @kvm: kvm struct pointer
*/
int kvm_vgic_map_resources(struct kvm *kvm)
{
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 28a93074ec..e85a495ada 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -52,7 +52,12 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
if (!irq)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&irq->lpi_list);
+ ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
+ if (ret) {
+ kfree(irq);
+ return ERR_PTR(ret);
+ }
+
INIT_LIST_HEAD(&irq->ap_list);
raw_spin_lock_init(&irq->irq_lock);
@@ -68,30 +73,30 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
* There could be a race with another vgic_add_lpi(), so we need to
* check that we don't add a second list entry with the same LPI.
*/
- list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
- if (oldirq->intid != intid)
- continue;
-
+ oldirq = xa_load(&dist->lpi_xa, intid);
+ if (vgic_try_get_irq_kref(oldirq)) {
/* Someone was faster with adding this LPI, lets use that. */
kfree(irq);
irq = oldirq;
- /*
- * This increases the refcount, the caller is expected to
- * call vgic_put_irq() on the returned pointer once it's
- * finished with the IRQ.
- */
- vgic_get_irq_kref(irq);
+ goto out_unlock;
+ }
+ ret = xa_err(xa_store(&dist->lpi_xa, intid, irq, 0));
+ if (ret) {
+ xa_release(&dist->lpi_xa, intid);
+ kfree(irq);
goto out_unlock;
}
- list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
- dist->lpi_list_count++;
+ atomic_inc(&dist->lpi_count);
out_unlock:
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ if (ret)
+ return ERR_PTR(ret);
+
/*
* We "cache" the configuration table entries in our struct vgic_irq's.
* However we only have those structs for mapped IRQs, so we read in
@@ -158,7 +163,7 @@ struct vgic_translation_cache_entry {
* @cte_esz: collection table entry size
* @dte_esz: device table entry size
* @ite_esz: interrupt translation table entry size
- * @save tables: save the ITS tables into guest RAM
+ * @save_tables: save the ITS tables into guest RAM
* @restore_tables: restore the ITS internal structs from tables
* stored in guest RAM
* @commit: initialize the registers which expose the ABI settings,
@@ -311,6 +316,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
return 0;
}
+#define GIC_LPI_MAX_INTID ((1 << INTERRUPT_ID_BITS_ITS) - 1)
+
/*
* Create a snapshot of the current LPIs targeting @vcpu, so that we can
* enumerate those LPIs without holding any lock.
@@ -319,6 +326,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
{
struct vgic_dist *dist = &kvm->arch.vgic;
+ XA_STATE(xas, &dist->lpi_xa, GIC_LPI_OFFSET);
struct vgic_irq *irq;
unsigned long flags;
u32 *intids;
@@ -331,13 +339,15 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
* command). If coming from another path (such as enabling LPIs),
* we must be careful not to overrun the array.
*/
- irq_count = READ_ONCE(dist->lpi_list_count);
+ irq_count = atomic_read(&dist->lpi_count);
intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT);
if (!intids)
return -ENOMEM;
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
- list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
+ rcu_read_lock();
+
+ xas_for_each(&xas, irq, GIC_LPI_MAX_INTID) {
if (i == irq_count)
break;
/* We don't need to "get" the IRQ, as we hold the list lock. */
@@ -345,6 +355,8 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
continue;
intids[i++] = irq->intid;
}
+
+ rcu_read_unlock();
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
*intid_ptr = intids;
@@ -595,8 +607,8 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
irq = __vgic_its_check_cache(dist, db, devid, eventid);
- if (irq)
- vgic_get_irq_kref(irq);
+ if (!vgic_try_get_irq_kref(irq))
+ irq = NULL;
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
@@ -640,8 +652,13 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
* was in the cache, and increment it on the new interrupt.
*/
if (cte->irq)
- __vgic_put_lpi_locked(kvm, cte->irq);
+ vgic_put_irq(kvm, cte->irq);
+ /*
+ * The irq refcount is guaranteed to be nonzero while holding the
+ * its_lock, as the ITE (and the reference it holds) cannot be freed.
+ */
+ lockdep_assert_held(&its->its_lock);
vgic_get_irq_kref(irq);
cte->db = db;
@@ -672,7 +689,7 @@ void vgic_its_invalidate_cache(struct kvm *kvm)
if (!cte->irq)
break;
- __vgic_put_lpi_locked(kvm, cte->irq);
+ vgic_put_irq(kvm, cte->irq);
cte->irq = NULL;
}
@@ -1345,8 +1362,8 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
}
/**
- * vgic_its_invall - invalidate all LPIs targetting a given vcpu
- * @vcpu: the vcpu for which the RD is targetted by an invalidation
+ * vgic_its_invall - invalidate all LPIs targeting a given vcpu
+ * @vcpu: the vcpu for which the RD is targeted by an invalidation
*
* Contrary to the INVALL command, this targets a RD instead of a
* collection, and we don't need to hold the its_lock, since no ITS is
@@ -2144,7 +2161,7 @@ static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
}
/**
- * entry_fn_t - Callback called on a table entry restore path
+ * typedef entry_fn_t - Callback called on a table entry restore path
* @its: its handle
* @id: id of the entry
* @entry: pointer to the entry
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index c15ee1df03..dad60b1e21 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -919,8 +919,19 @@ free:
return ret;
}
-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
+void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg)
{
+ struct kvm_vcpu *vcpu;
+ unsigned long c;
+
+ lockdep_assert_held(&kvm->arch.config_lock);
+
+ /* Garbage collect the region */
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ if (vcpu->arch.vgic_cpu.rdreg == rdreg)
+ vcpu->arch.vgic_cpu.rdreg = NULL;
+ }
+
list_del(&rdreg->list);
kfree(rdreg);
}
@@ -945,7 +956,7 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
mutex_lock(&kvm->arch.config_lock);
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
- vgic_v3_free_redist_region(rdreg);
+ vgic_v3_free_redist_region(kvm, rdreg);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 9465d3706a..4ea3340786 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -380,6 +380,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
struct vgic_irq *irq;
gpa_t last_ptr = ~(gpa_t)0;
bool vlpi_avail = false;
+ unsigned long index;
int ret = 0;
u8 val;
@@ -396,7 +397,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
vlpi_avail = true;
}
- list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
+ xa_for_each(&dist->lpi_xa, index, irq) {
int byte_offset, bit_nr;
struct kvm_vcpu *vcpu;
gpa_t pendbase, ptr;
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index db2a95762b..4ec93587c8 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -30,7 +30,8 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* its->its_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock must be taken with IRQs disabled
- * vgic_irq->irq_lock must be taken with IRQs disabled
+ * vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
+ * vgic_irq->irq_lock must be taken with IRQs disabled
*
* As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower
@@ -54,32 +55,22 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
*/
/*
- * Iterate over the VM's list of mapped LPIs to find the one with a
- * matching interrupt ID and return a reference to the IRQ structure.
+ * Index the VM's xarray of mapped LPIs and return a reference to the IRQ
+ * structure. The caller is expected to call vgic_put_irq() later once it's
+ * finished with the IRQ.
*/
static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = NULL;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
- list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
- if (irq->intid != intid)
- continue;
+ rcu_read_lock();
- /*
- * This increases the refcount, the caller is expected to
- * call vgic_put_irq() later once it's finished with the IRQ.
- */
- vgic_get_irq_kref(irq);
- goto out_unlock;
- }
- irq = NULL;
+ irq = xa_load(&dist->lpi_xa, intid);
+ if (!vgic_try_get_irq_kref(irq))
+ irq = NULL;
-out_unlock:
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ rcu_read_unlock();
return irq;
}
@@ -120,22 +111,6 @@ static void vgic_irq_release(struct kref *ref)
{
}
-/*
- * Drop the refcount on the LPI. Must be called with lpi_list_lock held.
- */
-void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
-
- if (!kref_put(&irq->refcount, vgic_irq_release))
- return;
-
- list_del(&irq->lpi_list);
- dist->lpi_list_count--;
-
- kfree(irq);
-}
-
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
struct vgic_dist *dist = &kvm->arch.vgic;
@@ -144,9 +119,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
if (irq->intid < VGIC_MIN_LPI)
return;
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
- __vgic_put_lpi_locked(kvm, irq);
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ if (!kref_put(&irq->refcount, vgic_irq_release))
+ return;
+
+ xa_lock_irqsave(&dist->lpi_xa, flags);
+ __xa_erase(&dist->lpi_xa, irq->intid);
+ xa_unlock_irqrestore(&dist->lpi_xa, flags);
+
+ atomic_dec(&dist->lpi_count);
+ kfree_rcu(irq, rcu);
}
void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
@@ -203,7 +184,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
}
/**
- * kvm_vgic_target_oracle - compute the target vcpu for an irq
+ * vgic_target_oracle - compute the target vcpu for an irq
*
* @irq: The irq to route. Must be already locked.
*
@@ -404,7 +385,8 @@ retry:
/*
* Grab a reference to the irq to reflect the fact that it is
- * now in the ap_list.
+ * now in the ap_list. This is safe as the caller must already hold a
+ * reference on the irq.
*/
vgic_get_irq_kref(irq);
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 8d134569d0..08b4c09a08 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -180,7 +180,6 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
gpa_t addr, int len);
struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
u32 intid);
-void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq);
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
bool vgic_get_phys_line_level(struct vgic_irq *irq);
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
@@ -220,12 +219,20 @@ void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
void vgic_v2_save_state(struct kvm_vcpu *vcpu);
void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
-static inline void vgic_get_irq_kref(struct vgic_irq *irq)
+static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq)
{
+ if (!irq)
+ return false;
+
if (irq->intid < VGIC_MIN_LPI)
- return;
+ return true;
- kref_get(&irq->refcount);
+ return kref_get_unless_zero(&irq->refcount);
+}
+
+static inline void vgic_get_irq_kref(struct vgic_irq *irq)
+{
+ WARN_ON_ONCE(!vgic_try_get_irq_kref(irq));
}
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
@@ -310,7 +317,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
u32 index);
-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
+void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);