summaryrefslogtreecommitdiffstats
path: root/arch/riscv/kvm/vcpu_onereg.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:35:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:31 +0000
commit85c675d0d09a45a135bddd15d7b385f8758c32fb (patch)
tree76267dbc9b9a130337be3640948fe397b04ac629 /arch/riscv/kvm/vcpu_onereg.c
parentAdding upstream version 6.6.15. (diff)
downloadlinux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz
linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/riscv/kvm/vcpu_onereg.c')
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c74
1 files changed, 70 insertions, 4 deletions
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index b7e0e03c69..f8c9fa0c03 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -13,7 +13,7 @@
#include <linux/uaccess.h>
#include <linux/kvm_host.h>
#include <asm/cacheflush.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
#include <asm/kvm_vcpu_vector.h>
#include <asm/vector.h>
@@ -34,6 +34,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
/* Multi letter extensions (alphabetically sorted) */
+ KVM_ISA_EXT_ARR(SMSTATEEN),
KVM_ISA_EXT_ARR(SSAIA),
KVM_ISA_EXT_ARR(SSTC),
KVM_ISA_EXT_ARR(SVINVAL),
@@ -45,6 +46,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(ZICBOM),
KVM_ISA_EXT_ARR(ZICBOZ),
KVM_ISA_EXT_ARR(ZICNTR),
+ KVM_ISA_EXT_ARR(ZICOND),
KVM_ISA_EXT_ARR(ZICSR),
KVM_ISA_EXT_ARR(ZIFENCEI),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
@@ -80,11 +82,11 @@ static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
{
switch (ext) {
+ /* Extensions which don't have any mechanism to disable */
case KVM_RISCV_ISA_EXT_A:
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
- case KVM_RISCV_ISA_EXT_SSAIA:
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_SVNAPOT:
@@ -92,11 +94,15 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_ZBB:
case KVM_RISCV_ISA_EXT_ZBS:
case KVM_RISCV_ISA_EXT_ZICNTR:
+ case KVM_RISCV_ISA_EXT_ZICOND:
case KVM_RISCV_ISA_EXT_ZICSR:
case KVM_RISCV_ISA_EXT_ZIFENCEI:
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_RISCV_ISA_EXT_ZIHPM:
return false;
+ /* Extensions which can be disabled using Smstateen */
+ case KVM_RISCV_ISA_EXT_SSAIA:
+ return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
default:
break;
}
@@ -378,6 +384,34 @@ static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
return 0;
}
+static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long reg_val)
+{
+ struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
+
+ if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
+ sizeof(unsigned long))
+ return -EINVAL;
+
+ ((unsigned long *)csr)[reg_num] = reg_val;
+ return 0;
+}
+
+static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long *out_val)
+{
+ struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
+
+ if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
+ sizeof(unsigned long))
+ return -EINVAL;
+
+ *out_val = ((unsigned long *)csr)[reg_num];
+ return 0;
+}
+
static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
@@ -401,6 +435,12 @@ static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
case KVM_REG_RISCV_CSR_AIA:
rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
break;
+ case KVM_REG_RISCV_CSR_SMSTATEEN:
+ rc = -EINVAL;
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
+ rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
+ &reg_val);
+ break;
default:
rc = -ENOENT;
break;
@@ -440,6 +480,12 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
case KVM_REG_RISCV_CSR_AIA:
rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
break;
+ case KVM_REG_RISCV_CSR_SMSTATEEN:
+ rc = -EINVAL;
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
+ rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
+ reg_val);
+break;
default:
rc = -ENOENT;
break;
@@ -696,6 +742,8 @@ static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+ if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
+ n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
return n;
}
@@ -704,7 +752,7 @@ static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices)
{
int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
- int n2 = 0;
+ int n2 = 0, n3 = 0;
/* copy general csr regs */
for (int i = 0; i < n1; i++) {
@@ -738,7 +786,25 @@ static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
}
}
- return n1 + n2;
+ /* copy Smstateen csr regs */
+ if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
+ n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
+
+ for (int i = 0; i < n3; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ?
+ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+ KVM_REG_RISCV_CSR_SMSTATEEN | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+ }
+
+ return n1 + n2 + n3;
}
static inline unsigned long num_timer_regs(void)