summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/kvm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:40 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:40 +0000
commit8b0a8165cdad0f4133837d753649ef4682e42c3b (patch)
tree5c58f869f31ddb1f7bd6e8bdea269b680b36c5b6 /arch/loongarch/kvm
parentReleasing progress-linux version 6.8.12-1~progress7.99u1. (diff)
downloadlinux-8b0a8165cdad0f4133837d753649ef4682e42c3b.tar.xz
linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.zip
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/loongarch/kvm')
-rw-r--r--arch/loongarch/kvm/Kconfig2
-rw-r--r--arch/loongarch/kvm/mmu.c2
-rw-r--r--arch/loongarch/kvm/switch.S15
-rw-r--r--arch/loongarch/kvm/timer.c43
-rw-r--r--arch/loongarch/kvm/vcpu.c33
5 files changed, 46 insertions, 49 deletions
diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig
index 61f7e33b1f..c4ef2b4d97 100644
--- a/arch/loongarch/kvm/Kconfig
+++ b/arch/loongarch/kvm/Kconfig
@@ -20,7 +20,6 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on AS_HAS_LVZ_EXTENSION
- depends on HAVE_KVM
select HAVE_KVM_DIRTY_RING_ACQ_REL
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_COMMON
@@ -28,6 +27,7 @@ config KVM
select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER
select KVM_MMIO
+ select HAVE_KVM_READONLY_MEM
select KVM_XFER_TO_GUEST_WORK
help
Support hosting virtualized guest machines using
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index 50a6acd7ff..a556cff357 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -723,7 +723,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
/*
* Read each entry once. As above, a non-leaf entry can be promoted to
* a huge page _during_ this walk. Re-reading the entry could send the
- * walk into the weeks, e.g. p*d_large() returns false (sees the old
+ * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
* value) and then p*d_offset() walks into the target huge page instead
* of the old page table (sees the new value).
*/
diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
index ba976509bf..80e988985a 100644
--- a/arch/loongarch/kvm/switch.S
+++ b/arch/loongarch/kvm/switch.S
@@ -8,7 +8,7 @@
#include <asm/asmmacro.h>
#include <asm/loongarch.h>
#include <asm/regdef.h>
-#include <asm/stackframe.h>
+#include <asm/unwind_hints.h>
#define HGPR_OFFSET(x) (PT_R0 + 8*x)
#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x)
@@ -112,6 +112,7 @@
.text
.cfi_sections .debug_frame
SYM_CODE_START(kvm_exc_entry)
+ UNWIND_HINT_UNDEFINED
csrwr a2, KVM_TEMP_KS
csrrd a2, KVM_VCPU_KS
addi.d a2, a2, KVM_VCPU_ARCH
@@ -213,12 +214,6 @@ SYM_FUNC_START(kvm_enter_guest)
/* Save host GPRs */
kvm_save_host_gpr a2
- /* Save host CRMD, PRMD to stack */
- csrrd a3, LOONGARCH_CSR_CRMD
- st.d a3, a2, PT_CRMD
- csrrd a3, LOONGARCH_CSR_PRMD
- st.d a3, a2, PT_PRMD
-
addi.d a2, a1, KVM_VCPU_ARCH
st.d sp, a2, KVM_ARCH_HSP
st.d tp, a2, KVM_ARCH_HTP
@@ -279,3 +274,9 @@ SYM_FUNC_END(kvm_restore_lasx)
.section ".rodata"
SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
+
+#ifdef CONFIG_CPU_HAS_LBT
+STACK_FRAME_NON_STANDARD kvm_restore_fpu
+STACK_FRAME_NON_STANDARD kvm_restore_lsx
+STACK_FRAME_NON_STANDARD kvm_restore_lasx
+#endif
diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c
index 111328f608..bcc6b6d063 100644
--- a/arch/loongarch/kvm/timer.c
+++ b/arch/loongarch/kvm/timer.c
@@ -23,24 +23,6 @@ static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
}
-/*
- * Push timer forward on timeout.
- * Handle an hrtimer event by push the hrtimer forward a period.
- */
-static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
-{
- unsigned long cfg, period;
-
- /* Add periodic tick to current expire time */
- cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG);
- if (cfg & CSR_TCFG_PERIOD) {
- period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL);
- hrtimer_add_expires_ns(&vcpu->arch.swtimer, period);
- return HRTIMER_RESTART;
- } else
- return HRTIMER_NORESTART;
-}
-
/* Low level hrtimer wake routine */
enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
{
@@ -50,7 +32,7 @@ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
kvm_queue_irq(vcpu, INT_TI);
rcuwait_wake_up(&vcpu->wait);
- return kvm_count_timeout(vcpu);
+ return HRTIMER_NORESTART;
}
/*
@@ -93,7 +75,8 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
/*
* Freeze the soft-timer and sync the guest stable timer with it.
*/
- hrtimer_cancel(&vcpu->arch.swtimer);
+ if (kvm_vcpu_is_blocking(vcpu))
+ hrtimer_cancel(&vcpu->arch.swtimer);
/*
* From LoongArch Reference Manual Volume 1 Chapter 7.6.2
@@ -168,26 +151,20 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
* Here judge one-shot timer fired by checking whether TVAL is larger
* than TCFG
*/
- if (ticks < cfg) {
+ if (ticks < cfg)
delta = tick_to_ns(vcpu, ticks);
- expire = ktime_add_ns(ktime_get(), delta);
- vcpu->arch.expire = expire;
+ else
+ delta = 0;
+
+ expire = ktime_add_ns(ktime_get(), delta);
+ vcpu->arch.expire = expire;
+ if (kvm_vcpu_is_blocking(vcpu)) {
/*
* HRTIMER_MODE_PINNED is suggested since vcpu may run in
* the same physical cpu in next time
*/
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
- } else if (vcpu->stat.generic.blocking) {
- /*
- * Inject timer interrupt so that halt polling can dectect and exit.
- * VCPU is scheduled out already and sleeps in rcuwait queue and
- * will not poll pending events again. kvm_queue_irq() is not enough,
- * hrtimer swtimer should be used here.
- */
- expire = ktime_add_ns(ktime_get(), 10);
- vcpu->arch.expire = expire;
- hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
}
}
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 36106922b5..3a8779065f 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -304,11 +304,18 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
return -EINVAL;
switch (id) {
- case 2:
+ case LOONGARCH_CPUCFG0:
+ *v = GENMASK(31, 0);
+ return 0;
+ case LOONGARCH_CPUCFG1:
+ /* CPUCFG1_MSGINT is not supported by KVM */
+ *v = GENMASK(25, 0);
+ return 0;
+ case LOONGARCH_CPUCFG2:
/* CPUCFG2 features unconditionally supported by KVM */
*v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
- CPUCFG2_LAM;
+ CPUCFG2_LSPW | CPUCFG2_LAM;
/*
* For the ISA extensions listed below, if one is supported
* by the host, then it is also supported by KVM.
@@ -319,13 +326,25 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
*v |= CPUCFG2_LASX;
return 0;
+ case LOONGARCH_CPUCFG3:
+ *v = GENMASK(16, 0);
+ return 0;
+ case LOONGARCH_CPUCFG4:
+ case LOONGARCH_CPUCFG5:
+ *v = GENMASK(31, 0);
+ return 0;
+ case LOONGARCH_CPUCFG16:
+ *v = GENMASK(16, 0);
+ return 0;
+ case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
+ *v = GENMASK(30, 0);
+ return 0;
default:
/*
- * No restrictions on other valid CPUCFG IDs' values, but
- * CPUCFG data is limited to 32 bits as the LoongArch ISA
- * manual says (Volume 1, Section 2.2.10.5 "CPUCFG").
+ * CPUCFG bits should be zero if reserved by HW or not
+ * supported by KVM.
*/
- *v = U32_MAX;
+ *v = 0;
return 0;
}
}
@@ -344,7 +363,7 @@ static int kvm_check_cpucfg(int id, u64 val)
return -EINVAL;
switch (id) {
- case 2:
+ case LOONGARCH_CPUCFG2:
if (!(val & CPUCFG2_LLFTP))
/* Guests must have a constant timer */
return -EINVAL;