summaryrefslogtreecommitdiffstats
path: root/include/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'include/kvm')
-rw-r--r--include/kvm/arm_pmu.h11
-rw-r--r--include/kvm/arm_vgic.h9
2 files changed, 5 insertions, 15 deletions
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index df32355e3e..35d4ca4f61 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -90,16 +90,6 @@ void kvm_vcpu_pmu_resync_el0(void);
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0)
-/*
- * Evaluates as true when emulating PMUv3p5, and false otherwise.
- */
-#define kvm_pmu_is_3p5(vcpu) ({ \
- u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); \
- u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); \
- \
- pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; \
-})
-
u8 kvm_arm_pmu_get_pmuver_limit(void);
u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
int kvm_arm_set_default_pmu(struct kvm *kvm);
@@ -168,7 +158,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
}
#define kvm_vcpu_has_pmu(vcpu) ({ false; })
-#define kvm_pmu_is_3p5(vcpu) ({ false; })
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 8cc38e836f..4703594664 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/static_key.h>
#include <linux/types.h>
+#include <linux/xarray.h>
#include <kvm/iodev.h>
#include <linux/list.h>
#include <linux/jump_label.h>
@@ -116,7 +117,7 @@ struct irq_ops {
struct vgic_irq {
raw_spinlock_t irq_lock; /* Protects the content of the struct */
- struct list_head lpi_list; /* Used to link all LPIs together */
+ struct rcu_head rcu;
struct list_head ap_list;
struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
@@ -273,10 +274,10 @@ struct vgic_dist {
*/
u64 propbaser;
- /* Protects the lpi_list and the count value below. */
+ /* Protects the lpi_list. */
raw_spinlock_t lpi_list_lock;
- struct list_head lpi_list_head;
- int lpi_list_count;
+ struct xarray lpi_xa;
+ atomic_t lpi_count;
/* LPI translation cache */
struct list_head lpi_translation_cache;