diff options
Diffstat (limited to 'arch/riscv/kvm')
-rw-r--r-- | arch/riscv/kvm/aia.c | 2 | ||||
-rw-r--r-- | arch/riscv/kvm/main.c | 2 | ||||
-rw-r--r-- | arch/riscv/kvm/tlb.c | 2 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu.c | 76 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_fp.c | 2 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_onereg.c | 74 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_sbi.c | 61 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_sbi_replace.c | 32 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_vector.c | 2 |
9 files changed, 202 insertions, 51 deletions
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c index 74bb274405..a944294f6f 100644 --- a/arch/riscv/kvm/aia.c +++ b/arch/riscv/kvm/aia.c @@ -14,7 +14,7 @@ #include <linux/kvm_host.h> #include <linux/percpu.h> #include <linux/spinlock.h> -#include <asm/hwcap.h> +#include <asm/cpufeature.h> #include <asm/kvm_aia_imsic.h> struct aia_hgei_control { diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 48ae0d4b39..225a435d9c 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -11,7 +11,7 @@ #include <linux/module.h> #include <linux/kvm_host.h> #include <asm/csr.h> -#include <asm/hwcap.h> +#include <asm/cpufeature.h> #include <asm/sbi.h> long kvm_arch_dev_ioctl(struct file *filp, diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c index 44bc324aee..23c0e82b51 100644 --- a/arch/riscv/kvm/tlb.c +++ b/arch/riscv/kvm/tlb.c @@ -12,7 +12,7 @@ #include <linux/kvm_host.h> #include <asm/cacheflush.h> #include <asm/csr.h> -#include <asm/hwcap.h> +#include <asm/cpufeature.h> #include <asm/insn-def.h> #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL) diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 82229db1ce..e087c80907 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -141,6 +141,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (rc) return rc; + /* + * Setup SBI extensions + * NOTE: This must be the last thing to be initialized. + */ + kvm_riscv_vcpu_sbi_init(vcpu); + /* Reset VCPU */ kvm_riscv_reset_vcpu(vcpu); @@ -471,31 +477,38 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, return -EINVAL; } -static void kvm_riscv_vcpu_update_config(const unsigned long *isa) +static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu) { - u64 henvcfg = 0; + const unsigned long *isa = vcpu->arch.isa; + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; if (riscv_isa_extension_available(isa, SVPBMT)) - henvcfg |= ENVCFG_PBMTE; + cfg->henvcfg |= ENVCFG_PBMTE; if (riscv_isa_extension_available(isa, SSTC)) - henvcfg |= ENVCFG_STCE; + cfg->henvcfg |= ENVCFG_STCE; if (riscv_isa_extension_available(isa, ZICBOM)) - henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); + cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); if (riscv_isa_extension_available(isa, ZICBOZ)) - henvcfg |= ENVCFG_CBZE; - - csr_write(CSR_HENVCFG, henvcfg); -#ifdef CONFIG_32BIT - csr_write(CSR_HENVCFGH, henvcfg >> 32); -#endif + cfg->henvcfg |= ENVCFG_CBZE; + + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { + cfg->hstateen0 |= SMSTATEEN0_HSENVCFG; + if (riscv_isa_extension_available(isa, SSAIA)) + cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC | + SMSTATEEN0_AIA | + SMSTATEEN0_AIA_ISEL; + if (riscv_isa_extension_available(isa, SMSTATEEN)) + cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0; + } } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; csr_write(CSR_VSSTATUS, csr->vsstatus); csr_write(CSR_VSIE, csr->vsie); @@ -506,8 +519,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) csr_write(CSR_VSTVAL, csr->vstval); csr_write(CSR_HVIP, csr->hvip); csr_write(CSR_VSATP, csr->vsatp); - - kvm_riscv_vcpu_update_config(vcpu->arch.isa); + csr_write(CSR_HENVCFG, cfg->henvcfg); + if (IS_ENABLED(CONFIG_32BIT)) + csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32); + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { + csr_write(CSR_HSTATEEN0, cfg->hstateen0); + if (IS_ENABLED(CONFIG_32BIT)) + csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32); + } kvm_riscv_gstage_update_hgatp(vcpu); @@ -606,6 +625,32 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) kvm_riscv_vcpu_aia_update_hvip(vcpu); } +static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; + + vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg); + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) && + (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) + vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0, + smcsr->sstateen0); +} + +static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; + + csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg); + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) && + (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) + smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0, + vcpu->arch.host_sstateen0); +} + /* * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while * the vCPU is running. @@ -615,10 +660,12 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) */ static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu) { + kvm_riscv_vcpu_swap_in_guest_state(vcpu); guest_state_enter_irqoff(); __kvm_riscv_switch_to(&vcpu->arch); vcpu->arch.last_exit_cpu = vcpu->cpu; guest_state_exit_irqoff(); + kvm_riscv_vcpu_swap_in_host_state(vcpu); } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) @@ -627,6 +674,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) struct kvm_cpu_trap trap; struct kvm_run *run = vcpu->run; + if (!vcpu->arch.ran_atleast_once) + kvm_riscv_vcpu_setup_config(vcpu); + /* Mark this VCPU ran at least once */ vcpu->arch.ran_atleast_once = true; diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c index 08ba48a395..030904d82b 100644 --- a/arch/riscv/kvm/vcpu_fp.c +++ b/arch/riscv/kvm/vcpu_fp.c @@ -11,7 +11,7 @@ #include <linux/err.h> #include <linux/kvm_host.h> #include <linux/uaccess.h> -#include <asm/hwcap.h> +#include <asm/cpufeature.h> #ifdef CONFIG_FPU void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index b7e0e03c69..f8c9fa0c03 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -13,7 +13,7 @@ #include <linux/uaccess.h> #include <linux/kvm_host.h> #include <asm/cacheflush.h> -#include <asm/hwcap.h> +#include <asm/cpufeature.h> #include <asm/kvm_vcpu_vector.h> #include <asm/vector.h> @@ -34,6 +34,7 @@ static const unsigned long kvm_isa_ext_arr[] = { [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v, /* Multi letter extensions (alphabetically sorted) */ + KVM_ISA_EXT_ARR(SMSTATEEN), KVM_ISA_EXT_ARR(SSAIA), KVM_ISA_EXT_ARR(SSTC), KVM_ISA_EXT_ARR(SVINVAL), @@ -45,6 +46,7 @@ static const unsigned long kvm_isa_ext_arr[] = { KVM_ISA_EXT_ARR(ZICBOM), KVM_ISA_EXT_ARR(ZICBOZ), KVM_ISA_EXT_ARR(ZICNTR), + KVM_ISA_EXT_ARR(ZICOND), KVM_ISA_EXT_ARR(ZICSR), KVM_ISA_EXT_ARR(ZIFENCEI), KVM_ISA_EXT_ARR(ZIHINTPAUSE), @@ -80,11 +82,11 @@ static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext) static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) { switch (ext) { + /* Extensions which don't have any mechanism to disable */ case KVM_RISCV_ISA_EXT_A: case KVM_RISCV_ISA_EXT_C: case KVM_RISCV_ISA_EXT_I: case KVM_RISCV_ISA_EXT_M: - case KVM_RISCV_ISA_EXT_SSAIA: case KVM_RISCV_ISA_EXT_SSTC: case KVM_RISCV_ISA_EXT_SVINVAL: case KVM_RISCV_ISA_EXT_SVNAPOT: @@ -92,11 +94,15 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_ZBB: case KVM_RISCV_ISA_EXT_ZBS: case KVM_RISCV_ISA_EXT_ZICNTR: + case KVM_RISCV_ISA_EXT_ZICOND: case KVM_RISCV_ISA_EXT_ZICSR: case KVM_RISCV_ISA_EXT_ZIFENCEI: case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: case KVM_RISCV_ISA_EXT_ZIHPM: return false; + /* Extensions which can be disabled using Smstateen */ + case KVM_RISCV_ISA_EXT_SSAIA: + return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN); default: break; } @@ -378,6 +384,34 @@ static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu, return 0; } +static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long reg_val) +{ + struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; + + if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / + sizeof(unsigned long)) + return -EINVAL; + + ((unsigned long *)csr)[reg_num] = reg_val; + return 0; +} + +static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long *out_val) +{ + struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; + + if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / + sizeof(unsigned long)) + return -EINVAL; + + *out_val = ((unsigned long *)csr)[reg_num]; + return 0; +} + static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -401,6 +435,12 @@ static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_CSR_AIA: rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val); break; + case KVM_REG_RISCV_CSR_SMSTATEEN: + rc = -EINVAL; + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) + rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, + ®_val); + break; default: rc = -ENOENT; break; @@ -440,6 +480,12 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_CSR_AIA: rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val); break; + case KVM_REG_RISCV_CSR_SMSTATEEN: + rc = -EINVAL; + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) + rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, + reg_val); +break; default: rc = -ENOENT; break; @@ -696,6 +742,8 @@ static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu) if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); + if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) + n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long); return n; } @@ -704,7 +752,7 @@ static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu, u64 __user *uindices) { int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); - int n2 = 0; + int n2 = 0, n3 = 0; /* copy general csr regs */ for (int i = 0; i < n1; i++) { @@ -738,7 +786,25 @@ static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu, } } - return n1 + n2; + /* copy Smstateen csr regs */ + if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) { + n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long); + + for (int i = 0; i < n3; i++) { + u64 size = IS_ENABLED(CONFIG_32BIT) ? + KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; + u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR | + KVM_REG_RISCV_CSR_SMSTATEEN | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + } + } + + return n1 + n2 + n3; } static inline unsigned long num_timer_regs(void) diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 9cd97091c7..a04ff98085 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -67,6 +67,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = { .ext_ptr = &vcpu_sbi_ext_pmu, }, { + .ext_idx = KVM_RISCV_SBI_EXT_DBCN, + .ext_ptr = &vcpu_sbi_ext_dbcn, + }, + { .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL, .ext_ptr = &vcpu_sbi_ext_experimental, }, @@ -155,14 +159,8 @@ static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu, if (!sext) return -ENOENT; - /* - * We can't set the extension status to available here, since it may - * have a probe() function which needs to confirm availability first, - * but it may be too early to call that here. We can set the status to - * unavailable, though. - */ - if (!reg_val) - scontext->ext_status[sext->ext_idx] = + scontext->ext_status[sext->ext_idx] = (reg_val) ? + KVM_RISCV_SBI_EXT_AVAILABLE : KVM_RISCV_SBI_EXT_UNAVAILABLE; return 0; @@ -188,16 +186,8 @@ static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu, if (!sext) return -ENOENT; - /* - * If the extension status is still uninitialized, then we should probe - * to determine if it's available, but it may be too early to do that - * here. The best we can do is report that the extension has not been - * disabled, i.e. we return 1 when the extension is available and also - * when it only may be available. - */ - *reg_val = scontext->ext_status[sext->ext_idx] != - KVM_RISCV_SBI_EXT_UNAVAILABLE; - + *reg_val = scontext->ext_status[sext->ext_idx] == + KVM_RISCV_SBI_EXT_AVAILABLE; return 0; } @@ -337,18 +327,8 @@ const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext( scontext->ext_status[entry->ext_idx] == KVM_RISCV_SBI_EXT_AVAILABLE) return ext; - if (scontext->ext_status[entry->ext_idx] == - KVM_RISCV_SBI_EXT_UNAVAILABLE) - return NULL; - if (ext->probe && !ext->probe(vcpu)) { - scontext->ext_status[entry->ext_idx] = - KVM_RISCV_SBI_EXT_UNAVAILABLE; - return NULL; - } - scontext->ext_status[entry->ext_idx] = - KVM_RISCV_SBI_EXT_AVAILABLE; - return ext; + return NULL; } } @@ -419,3 +399,26 @@ ecall_done: return ret; } + +void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; + const struct kvm_riscv_sbi_extension_entry *entry; + const struct kvm_vcpu_sbi_extension *ext; + int i; + + for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { + entry = &sbi_ext[i]; + ext = entry->ext_ptr; + + if (ext->probe && !ext->probe(vcpu)) { + scontext->ext_status[entry->ext_idx] = + KVM_RISCV_SBI_EXT_UNAVAILABLE; + continue; + } + + scontext->ext_status[entry->ext_idx] = ext->default_unavail ? + KVM_RISCV_SBI_EXT_UNAVAILABLE : + KVM_RISCV_SBI_EXT_AVAILABLE; + } +} diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c index 7c4d5d38a3..23b57c931b 100644 --- a/arch/riscv/kvm/vcpu_sbi_replace.c +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -175,3 +175,35 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = { .extid_end = SBI_EXT_SRST, .handler = kvm_sbi_ext_srst_handler, }; + +static int kvm_sbi_ext_dbcn_handler(struct kvm_vcpu *vcpu, + struct kvm_run *run, + struct kvm_vcpu_sbi_return *retdata) +{ + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + unsigned long funcid = cp->a6; + + switch (funcid) { + case SBI_EXT_DBCN_CONSOLE_WRITE: + case SBI_EXT_DBCN_CONSOLE_READ: + case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: + /* + * The SBI debug console functions are unconditionally + * forwarded to the userspace. + */ + kvm_riscv_vcpu_sbi_forward(vcpu, run); + retdata->uexit = true; + break; + default: + retdata->err_val = SBI_ERR_NOT_SUPPORTED; + } + + return 0; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn = { + .extid_start = SBI_EXT_DBCN, + .extid_end = SBI_EXT_DBCN, + .default_unavail = true, + .handler = kvm_sbi_ext_dbcn_handler, +}; diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c index b430cbb695..b339a2682f 100644 --- a/arch/riscv/kvm/vcpu_vector.c +++ b/arch/riscv/kvm/vcpu_vector.c @@ -11,7 +11,7 @@ #include <linux/err.h> #include <linux/kvm_host.h> #include <linux/uaccess.h> -#include <asm/hwcap.h> +#include <asm/cpufeature.h> #include <asm/kvm_vcpu_vector.h> #include <asm/vector.h> |