summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/include/asm')
-rw-r--r--arch/loongarch/include/asm/acpi.h4
-rw-r--r--arch/loongarch/include/asm/atomic.h88
-rw-r--r--arch/loongarch/include/asm/inst.h29
-rw-r--r--arch/loongarch/include/asm/jump_label.h4
-rw-r--r--arch/loongarch/include/asm/kvm_csr.h211
-rw-r--r--arch/loongarch/include/asm/kvm_host.h237
-rw-r--r--arch/loongarch/include/asm/kvm_mmu.h139
-rw-r--r--arch/loongarch/include/asm/kvm_types.h11
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h93
-rw-r--r--arch/loongarch/include/asm/local.h27
-rw-r--r--arch/loongarch/include/asm/loongarch.h24
-rw-r--r--arch/loongarch/include/asm/percpu.h11
-rw-r--r--arch/loongarch/include/asm/pgalloc.h1
13 files changed, 831 insertions, 48 deletions
diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
index 8de6c4b83a..49e29b2999 100644
--- a/arch/loongarch/include/asm/acpi.h
+++ b/arch/loongarch/include/asm/acpi.h
@@ -32,8 +32,10 @@ static inline bool acpi_has_cpu_in_madt(void)
return true;
}
+#define MAX_CORE_PIC 256
+
extern struct list_head acpi_wakeup_device_list;
-extern struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
+extern struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
extern int __init parse_acpi_topology(void);
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
index e27f0c72d3..99af8b3160 100644
--- a/arch/loongarch/include/asm/atomic.h
+++ b/arch/loongarch/include/asm/atomic.h
@@ -36,19 +36,19 @@
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__( \
- "am"#asm_op"_db.w" " $zero, %1, %0 \n" \
+ "am"#asm_op".w" " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}
-#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
-static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
+#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
+static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
- "am"#asm_op"_db.w" " %1, %2, %0 \n" \
+ "am"#asm_op#mb".w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
@@ -56,13 +56,13 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
return result c_op I; \
}
-#define ATOMIC_FETCH_OP(op, I, asm_op) \
-static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
+static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
- "am"#asm_op"_db.w" " %1, %2, %0 \n" \
+ "am"#asm_op#mb".w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
@@ -72,29 +72,53 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
#define ATOMIC_OPS(op, I, asm_op, c_op) \
ATOMIC_OP(op, I, asm_op) \
- ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
- ATOMIC_FETCH_OP(op, I, asm_op)
+ ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
+ ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
+ ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC_OPS(add, i, add, +)
ATOMIC_OPS(sub, -i, add, +)
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_add_return_acquire arch_atomic_add_return
+#define arch_atomic_add_return_release arch_atomic_add_return
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return
+#define arch_atomic_sub_return_release arch_atomic_sub_return
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, I, asm_op) \
ATOMIC_OP(op, I, asm_op) \
- ATOMIC_FETCH_OP(op, I, asm_op)
+ ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC_OPS(and, i, and)
ATOMIC_OPS(or, i, or)
ATOMIC_OPS(xor, i, xor)
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
@@ -172,18 +196,18 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
{ \
__asm__ __volatile__( \
- "am"#asm_op"_db.d " " $zero, %1, %0 \n" \
+ "am"#asm_op".d " " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}
-#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
-static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
+#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
+static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
{ \
long result; \
__asm__ __volatile__( \
- "am"#asm_op"_db.d " " %1, %2, %0 \n" \
+ "am"#asm_op#mb".d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
@@ -191,13 +215,13 @@ static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
return result c_op I; \
}
-#define ATOMIC64_FETCH_OP(op, I, asm_op) \
-static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
+#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
+static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
{ \
long result; \
\
__asm__ __volatile__( \
- "am"#asm_op"_db.d " " %1, %2, %0 \n" \
+ "am"#asm_op#mb".d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
@@ -207,29 +231,53 @@ static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
#define ATOMIC64_OPS(op, I, asm_op, c_op) \
ATOMIC64_OP(op, I, asm_op) \
- ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
- ATOMIC64_FETCH_OP(op, I, asm_op)
+ ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
+ ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
+ ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC64_OPS(add, i, add, +)
ATOMIC64_OPS(sub, -i, add, +)
+#define arch_atomic64_add_return arch_atomic64_add_return
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return
+#define arch_atomic64_add_return_release arch_atomic64_add_return
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, I, asm_op) \
ATOMIC64_OP(op, I, asm_op) \
- ATOMIC64_FETCH_OP(op, I, asm_op)
+ ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
+ ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC64_OPS(and, i, and)
ATOMIC64_OPS(or, i, or)
ATOMIC64_OPS(xor, i, xor)
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index 71e1ed4165..d8f637f9e4 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -65,6 +65,16 @@ enum reg2_op {
revbd_op = 0x0f,
revh2w_op = 0x10,
revhd_op = 0x11,
+ extwh_op = 0x16,
+ extwb_op = 0x17,
+ iocsrrdb_op = 0x19200,
+ iocsrrdh_op = 0x19201,
+ iocsrrdw_op = 0x19202,
+ iocsrrdd_op = 0x19203,
+ iocsrwrb_op = 0x19204,
+ iocsrwrh_op = 0x19205,
+ iocsrwrw_op = 0x19206,
+ iocsrwrd_op = 0x19207,
};
enum reg2i5_op {
@@ -318,6 +328,13 @@ struct reg2bstrd_format {
unsigned int opcode : 10;
};
+struct reg2csr_format {
+ unsigned int rd : 5;
+ unsigned int rj : 5;
+ unsigned int csr : 14;
+ unsigned int opcode : 8;
+};
+
struct reg3_format {
unsigned int rd : 5;
unsigned int rj : 5;
@@ -346,6 +363,7 @@ union loongarch_instruction {
struct reg2i14_format reg2i14_format;
struct reg2i16_format reg2i16_format;
struct reg2bstrd_format reg2bstrd_format;
+ struct reg2csr_format reg2csr_format;
struct reg3_format reg3_format;
struct reg3sa2_format reg3sa2_format;
};
@@ -556,6 +574,8 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
DEF_EMIT_REG2_FORMAT(revb2h, revb2h_op)
DEF_EMIT_REG2_FORMAT(revb2w, revb2w_op)
DEF_EMIT_REG2_FORMAT(revbd, revbd_op)
+DEF_EMIT_REG2_FORMAT(extwh, extwh_op)
+DEF_EMIT_REG2_FORMAT(extwb, extwb_op)
#define DEF_EMIT_REG2I5_FORMAT(NAME, OP) \
static inline void emit_##NAME(union loongarch_instruction *insn, \
@@ -607,6 +627,9 @@ DEF_EMIT_REG2I12_FORMAT(lu52id, lu52id_op)
DEF_EMIT_REG2I12_FORMAT(andi, andi_op)
DEF_EMIT_REG2I12_FORMAT(ori, ori_op)
DEF_EMIT_REG2I12_FORMAT(xori, xori_op)
+DEF_EMIT_REG2I12_FORMAT(ldb, ldb_op)
+DEF_EMIT_REG2I12_FORMAT(ldh, ldh_op)
+DEF_EMIT_REG2I12_FORMAT(ldw, ldw_op)
DEF_EMIT_REG2I12_FORMAT(ldbu, ldbu_op)
DEF_EMIT_REG2I12_FORMAT(ldhu, ldhu_op)
DEF_EMIT_REG2I12_FORMAT(ldwu, ldwu_op)
@@ -685,9 +708,12 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
insn->reg3_format.rk = rk; \
}
+DEF_EMIT_REG3_FORMAT(addw, addw_op)
DEF_EMIT_REG3_FORMAT(addd, addd_op)
DEF_EMIT_REG3_FORMAT(subd, subd_op)
DEF_EMIT_REG3_FORMAT(muld, muld_op)
+DEF_EMIT_REG3_FORMAT(divd, divd_op)
+DEF_EMIT_REG3_FORMAT(modd, modd_op)
DEF_EMIT_REG3_FORMAT(divdu, divdu_op)
DEF_EMIT_REG3_FORMAT(moddu, moddu_op)
DEF_EMIT_REG3_FORMAT(and, and_op)
@@ -699,6 +725,9 @@ DEF_EMIT_REG3_FORMAT(srlw, srlw_op)
DEF_EMIT_REG3_FORMAT(srld, srld_op)
DEF_EMIT_REG3_FORMAT(sraw, sraw_op)
DEF_EMIT_REG3_FORMAT(srad, srad_op)
+DEF_EMIT_REG3_FORMAT(ldxb, ldxb_op)
+DEF_EMIT_REG3_FORMAT(ldxh, ldxh_op)
+DEF_EMIT_REG3_FORMAT(ldxw, ldxw_op)
DEF_EMIT_REG3_FORMAT(ldxbu, ldxbu_op)
DEF_EMIT_REG3_FORMAT(ldxhu, ldxhu_op)
DEF_EMIT_REG3_FORMAT(ldxwu, ldxwu_op)
diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h
index 3cea299a5e..29acfe3de3 100644
--- a/arch/loongarch/include/asm/jump_label.h
+++ b/arch/loongarch/include/asm/jump_label.h
@@ -22,7 +22,7 @@
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
- asm_volatile_goto(
+ asm goto(
"1: nop \n\t"
JUMP_TABLE_ENTRY
: : "i"(&((char *)key)[branch]) : : l_yes);
@@ -35,7 +35,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
- asm_volatile_goto(
+ asm goto(
"1: b %l[l_yes] \n\t"
JUMP_TABLE_ENTRY
: : "i"(&((char *)key)[branch]) : : l_yes);
diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
new file mode 100644
index 0000000000..724ca8b7b4
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_csr.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_CSR_H__
+#define __ASM_LOONGARCH_KVM_CSR_H__
+
+#include <linux/uaccess.h>
+#include <linux/kvm_host.h>
+#include <asm/loongarch.h>
+#include <asm/kvm_vcpu.h>
+
+#define gcsr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__( \
+ " gcsrrd %[val], %[reg]\n\t" \
+ : [val] "=r" (__v) \
+ : [reg] "i" (csr) \
+ : "memory"); \
+ __v; \
+})
+
+#define gcsr_write(v, csr) \
+({ \
+ register unsigned long __v = v; \
+ __asm__ __volatile__ ( \
+ " gcsrwr %[val], %[reg]\n\t" \
+ : [val] "+r" (__v) \
+ : [reg] "i" (csr) \
+ : "memory"); \
+})
+
+#define gcsr_xchg(v, m, csr) \
+({ \
+ register unsigned long __v = v; \
+ __asm__ __volatile__( \
+ " gcsrxchg %[val], %[mask], %[reg]\n\t" \
+ : [val] "+r" (__v) \
+ : [mask] "r" (m), [reg] "i" (csr) \
+ : "memory"); \
+ __v; \
+})
+
+/* Guest CSRS read and write */
+#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD)
+#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD)
+#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD)
+#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD)
+#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN)
+#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN)
+#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC)
+#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC)
+#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG)
+#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG)
+#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT)
+#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT)
+#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA)
+#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA)
+#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV)
+#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV)
+#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI)
+#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI)
+#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY)
+#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY)
+
+#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID)
+#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID)
+#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL)
+#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL)
+#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH)
+#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH)
+#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD)
+#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD)
+#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0)
+#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0)
+#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1)
+#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1)
+#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE)
+#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE)
+#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG)
+#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG)
+
+#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID)
+#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID)
+#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1)
+#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1)
+#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2)
+#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2)
+#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3)
+#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3)
+
+#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0)
+#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0)
+#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1)
+#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1)
+#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2)
+#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2)
+#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3)
+#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3)
+#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4)
+#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4)
+#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5)
+#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5)
+#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6)
+#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6)
+#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7)
+#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7)
+
+#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID)
+#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID)
+#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG)
+#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG)
+#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL)
+#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL)
+#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC)
+#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC)
+
+#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL)
+#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL)
+
+#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX)
+#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX)
+#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY)
+#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY)
+#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV)
+#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV)
+#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA)
+#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA)
+#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE)
+#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE)
+#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0)
+#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0)
+#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1)
+#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1)
+#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI)
+#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI)
+#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD)
+#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD)
+
+#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0)
+#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0)
+#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1)
+#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1)
+#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2)
+#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2)
+#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3)
+#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3)
+
+/* Guest related CSRs */
+#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC)
+#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC)
+#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP)
+#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG)
+#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG)
+#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT)
+#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT)
+#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC)
+#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC)
+#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC)
+#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC)
+
+#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name)
+
+__BUILD_CSR_OP(gcfg)
+__BUILD_CSR_OP(gstat)
+__BUILD_CSR_OP(gtlbc)
+__BUILD_CSR_OP(gintc)
+__BUILD_GCSR_OP(llbctl)
+__BUILD_GCSR_OP(tlbidx)
+
+#define set_gcsr_estat(val) \
+ gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT)
+#define clear_gcsr_estat(val) \
+ gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT)
+
+#define kvm_read_hw_gcsr(id) gcsr_read(id)
+#define kvm_write_hw_gcsr(id, val) gcsr_write(val, id)
+
+#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
+#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
+
+int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
+{
+ return csr->csrs[gid];
+}
+
+static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned long val)
+{
+ csr->csrs[gid] = val;
+}
+
+static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr,
+ int gid, unsigned long val)
+{
+ csr->csrs[gid] |= val;
+}
+
+static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
+ int gid, unsigned long mask, unsigned long val)
+{
+ unsigned long _mask = mask;
+
+ csr->csrs[gid] &= ~_mask;
+ csr->csrs[gid] |= val & _mask;
+}
+
+#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
new file mode 100644
index 0000000000..11328700d4
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_HOST_H__
+#define __ASM_LOONGARCH_KVM_HOST_H__
+
+#include <linux/cpumask.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/types.h>
+
+#include <asm/inst.h>
+#include <asm/kvm_mmu.h>
+#include <asm/loongarch.h>
+
+/* Loongarch KVM register ids */
+#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
+#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
+
+#define KVM_MAX_VCPUS 256
+#define KVM_MAX_CPUCFG_REGS 21
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 0
+
+#define KVM_HALT_POLL_NS_DEFAULT 500000
+
+struct kvm_vm_stat {
+ struct kvm_vm_stat_generic generic;
+ u64 pages;
+ u64 hugepages;
+};
+
+struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
+ u64 int_exits;
+ u64 idle_exits;
+ u64 cpucfg_exits;
+ u64 signal_exits;
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_context {
+ unsigned long vpid_cache;
+ struct kvm_vcpu *last_vcpu;
+};
+
+struct kvm_world_switch {
+ int (*exc_entry)(void);
+ int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ unsigned long page_order;
+};
+
+#define MAX_PGTABLE_LEVELS 4
+
+struct kvm_arch {
+ /* Guest physical mm */
+ kvm_pte_t *pgd;
+ unsigned long gpa_size;
+ unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
+ unsigned int pte_shifts[MAX_PGTABLE_LEVELS];
+ unsigned int root_level;
+
+ s64 time_offset;
+ struct kvm_context __percpu *vmcs;
+};
+
+#define CSR_MAX_NUMS 0x800
+
+struct loongarch_csrs {
+ unsigned long csrs[CSR_MAX_NUMS];
+};
+
+/* Resume Flags */
+#define RESUME_HOST 0
+#define RESUME_GUEST 1
+
+enum emulation_result {
+ EMULATE_DONE, /* no further processing */
+ EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
+ EMULATE_DO_IOCSR, /* handle IOCSR request */
+ EMULATE_FAIL, /* can't emulate this instruction */
+ EMULATE_EXCEPT, /* A guest exception has been generated */
+};
+
+#define KVM_LARCH_FPU (0x1 << 0)
+#define KVM_LARCH_SWCSR_LATEST (0x1 << 1)
+#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
+
+struct kvm_vcpu_arch {
+ /*
+ * Switch pointer-to-function type to unsigned long
+ * for loading the value into register directly.
+ */
+ unsigned long host_eentry;
+ unsigned long guest_eentry;
+
+ /* Pointers stored here for easy accessing from assembly code */
+ int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+ /* Host registers preserved across guest mode execution */
+ unsigned long host_sp;
+ unsigned long host_tp;
+ unsigned long host_pgd;
+
+ /* Host CSRs are used when handling exits from guest */
+ unsigned long badi;
+ unsigned long badv;
+ unsigned long host_ecfg;
+ unsigned long host_estat;
+ unsigned long host_percpu;
+
+ /* GPRs */
+ unsigned long gprs[32];
+ unsigned long pc;
+
+ /* Which auxiliary state is loaded (KVM_LARCH_*) */
+ unsigned int aux_inuse;
+
+ /* FPU state */
+ struct loongarch_fpu fpu FPU_ALIGN;
+
+ /* CSR state */
+ struct loongarch_csrs *csr;
+
+ /* GPR used as IO source/target */
+ u32 io_gpr;
+
+ /* KVM register to control count timer */
+ u32 count_ctl;
+ struct hrtimer swtimer;
+
+ /* Bitmask of intr that are pending */
+ unsigned long irq_pending;
+ /* Bitmask of pending intr to be cleared */
+ unsigned long irq_clear;
+
+ /* Bitmask of exceptions that are pending */
+ unsigned long exception_pending;
+ unsigned int esubcode;
+
+ /* Cache for pages needed inside spinlock regions */
+ struct kvm_mmu_memory_cache mmu_page_cache;
+
+ /* vcpu's vpid */
+ u64 vpid;
+
+ /* Frequency of stable timer in Hz */
+ u64 timer_mhz;
+ ktime_t expire;
+
+ /* Last CPU the vCPU state was loaded on */
+ int last_sched_cpu;
+ /* mp state */
+ struct kvm_mp_state mp_state;
+ /* cpucfg */
+ u32 cpucfg[KVM_MAX_CPUCFG_REGS];
+};
+
+static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
+{
+ return csr->csrs[reg];
+}
+
+static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
+{
+ csr->csrs[reg] = val;
+}
+
+/* Debug: dump vcpu state */
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
+
+/* MMU handling */
+void kvm_flush_tlb_all(void);
+void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
+int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+
+static inline void update_pc(struct kvm_vcpu_arch *arch)
+{
+ arch->pc += 4;
+}
+
+/*
+ * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
+ * @vcpu: Virtual CPU.
+ *
+ * Returns: Whether the TLBL exception was likely due to an instruction
+ * fetch fault rather than a data load fault.
+ */
+static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
+{
+ return arch->pc == arch->badv;
+}
+
+/* Misc */
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {}
+void kvm_check_vpid(struct kvm_vcpu *vcpu);
+enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot);
+void kvm_init_vmcs(struct kvm *kvm);
+void kvm_exc_entry(void);
+int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+extern unsigned long vpid_mask;
+extern const unsigned long kvm_exception_size;
+extern const unsigned long kvm_enter_guest_size;
+extern struct kvm_world_switch *kvm_loongarch_ops;
+
+#define SW_GCSR (1 << 0)
+#define HW_GCSR (1 << 1)
+#define INVALID_GCSR (1 << 2)
+
+int get_gcsr_flag(int csr);
+void set_hw_gcsr(int csr_id, unsigned long val);
+
+#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
diff --git a/arch/loongarch/include/asm/kvm_mmu.h b/arch/loongarch/include/asm/kvm_mmu.h
new file mode 100644
index 0000000000..099bafc6f7
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_mmu.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_MMU_H__
+#define __ASM_LOONGARCH_KVM_MMU_H__
+
+#include <linux/kvm_host.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+
+/*
+ * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
+ * for which pages need to be cached.
+ */
+#define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1)
+
+#define _KVM_FLUSH_PGTABLE 0x1
+#define _KVM_HAS_PGMASK 0x2
+#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
+#define kvm_pte_pfn(x) ((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT))
+
+typedef unsigned long kvm_pte_t;
+typedef struct kvm_ptw_ctx kvm_ptw_ctx;
+typedef int (*kvm_pte_ops)(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx);
+
+struct kvm_ptw_ctx {
+ kvm_pte_ops ops;
+ unsigned long flag;
+
+ /* for kvm_arch_mmu_enable_log_dirty_pt_masked use */
+ unsigned long mask;
+ unsigned long gfn;
+
+ /* page walk mmu info */
+ unsigned int level;
+ unsigned long pgtable_shift;
+ unsigned long invalid_entry;
+ unsigned long *invalid_ptes;
+ unsigned int *pte_shifts;
+ void *opaque;
+
+ /* free pte table page list */
+ struct list_head list;
+};
+
+kvm_pte_t *kvm_pgd_alloc(void);
+
+static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val)
+{
+ WRITE_ONCE(*ptep, val);
+}
+
+static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; }
+static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; }
+static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; }
+static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; }
+
+static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte)
+{
+ return pte | _PAGE_ACCESSED;
+}
+
+static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte)
+{
+ return pte & ~_PAGE_ACCESSED;
+}
+
+static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte)
+{
+ return pte | _PAGE_DIRTY;
+}
+
+static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte)
+{
+ return pte & ~_PAGE_DIRTY;
+}
+
+static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte)
+{
+ return pte | _PAGE_HUGE;
+}
+
+static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte)
+{
+ return pte & ~_PAGE_HUGE;
+}
+
+static inline int kvm_need_flush(kvm_ptw_ctx *ctx)
+{
+ return ctx->flag & _KVM_FLUSH_PGTABLE;
+}
+
+static inline kvm_pte_t *kvm_pgtable_offset(kvm_ptw_ctx *ctx, kvm_pte_t *table,
+ phys_addr_t addr)
+{
+
+ return table + ((addr >> ctx->pgtable_shift) & (PTRS_PER_PTE - 1));
+}
+
+static inline phys_addr_t kvm_pgtable_addr_end(kvm_ptw_ctx *ctx,
+ phys_addr_t addr, phys_addr_t end)
+{
+ phys_addr_t boundary, size;
+
+ size = 0x1UL << ctx->pgtable_shift;
+ boundary = (addr + size) & ~(size - 1);
+ return (boundary - 1 < end - 1) ? boundary : end;
+}
+
+static inline int kvm_pte_present(kvm_ptw_ctx *ctx, kvm_pte_t *entry)
+{
+ if (!ctx || ctx->level == 0)
+ return !!(*entry & _PAGE_PRESENT);
+
+ return *entry != ctx->invalid_entry;
+}
+
+static inline int kvm_pte_none(kvm_ptw_ctx *ctx, kvm_pte_t *entry)
+{
+ return *entry == ctx->invalid_entry;
+}
+
+static inline void kvm_ptw_enter(kvm_ptw_ctx *ctx)
+{
+ ctx->level--;
+ ctx->pgtable_shift = ctx->pte_shifts[ctx->level];
+ ctx->invalid_entry = ctx->invalid_ptes[ctx->level];
+}
+
+static inline void kvm_ptw_exit(kvm_ptw_ctx *ctx)
+{
+ ctx->level++;
+ ctx->pgtable_shift = ctx->pte_shifts[ctx->level];
+ ctx->invalid_entry = ctx->invalid_ptes[ctx->level];
+}
+
+#endif /* __ASM_LOONGARCH_KVM_MMU_H__ */
diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
new file mode 100644
index 0000000000..2fe1d4bdff
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_types.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_KVM_TYPES_H
+#define _ASM_LOONGARCH_KVM_TYPES_H
+
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+
+#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
new file mode 100644
index 0000000000..553cfa2b2b
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_VCPU_H__
+#define __ASM_LOONGARCH_KVM_VCPU_H__
+
+#include <linux/kvm_host.h>
+#include <asm/loongarch.h>
+
+/* Controlled by 0x5 guest estat */
+#define CPU_SIP0 (_ULCAST_(1))
+#define CPU_SIP1 (_ULCAST_(1) << 1)
+#define CPU_PMU (_ULCAST_(1) << 10)
+#define CPU_TIMER (_ULCAST_(1) << 11)
+#define CPU_IPI (_ULCAST_(1) << 12)
+
+/* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */
+#define CPU_IP0 (_ULCAST_(1))
+#define CPU_IP1 (_ULCAST_(1) << 1)
+#define CPU_IP2 (_ULCAST_(1) << 2)
+#define CPU_IP3 (_ULCAST_(1) << 3)
+#define CPU_IP4 (_ULCAST_(1) << 4)
+#define CPU_IP5 (_ULCAST_(1) << 5)
+#define CPU_IP6 (_ULCAST_(1) << 6)
+#define CPU_IP7 (_ULCAST_(1) << 7)
+
+#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20)
+
+/* KVM_IRQ_LINE irq field index values */
+#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24
+#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff
+#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16
+#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff
+#define KVM_LOONGSON_IRQ_NUM_SHIFT 0
+#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
+
+typedef union loongarch_instruction larch_inst;
+typedef int (*exit_handle_fn)(struct kvm_vcpu *);
+
+int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
+int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
+int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_emu_idle(struct kvm_vcpu *vcpu);
+int kvm_pending_timer(struct kvm_vcpu *vcpu);
+int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
+void kvm_deliver_intr(struct kvm_vcpu *vcpu);
+void kvm_deliver_exception(struct kvm_vcpu *vcpu);
+
+void kvm_own_fpu(struct kvm_vcpu *vcpu);
+void kvm_lose_fpu(struct kvm_vcpu *vcpu);
+void kvm_save_fpu(struct loongarch_fpu *fpu);
+void kvm_restore_fpu(struct loongarch_fpu *fpu);
+void kvm_restore_fcsr(struct loongarch_fpu *fpu);
+
+void kvm_acquire_timer(struct kvm_vcpu *vcpu);
+void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
+void kvm_reset_timer(struct kvm_vcpu *vcpu);
+void kvm_save_timer(struct kvm_vcpu *vcpu);
+void kvm_restore_timer(struct kvm_vcpu *vcpu);
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
+
+/*
+ * Loongarch KVM guest interrupt handling
+ */
+static inline void kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+ set_bit(irq, &vcpu->arch.irq_pending);
+ clear_bit(irq, &vcpu->arch.irq_clear);
+}
+
+static inline void kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+ clear_bit(irq, &vcpu->arch.irq_pending);
+ set_bit(irq, &vcpu->arch.irq_clear);
+}
+
+static inline int kvm_queue_exception(struct kvm_vcpu *vcpu,
+ unsigned int code, unsigned int subcode)
+{
+ /* only one exception can be injected */
+ if (!vcpu->arch.exception_pending) {
+ set_bit(code, &vcpu->arch.exception_pending);
+ vcpu->arch.esubcode = subcode;
+ return 0;
+ } else
+ return -1;
+}
+
+#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h
index c49675852b..f53ea653af 100644
--- a/arch/loongarch/include/asm/local.h
+++ b/arch/loongarch/include/asm/local.h
@@ -70,22 +70,27 @@ static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
/**
- * local_add_unless - add unless the number is a given value
+ * local_add_unless - add unless the number is already a given value
* @l: pointer of type local_t
* @a: the amount to add to l...
* @u: ...unless l is equal to u.
*
- * Atomically adds @a to @l, so long as it was not @u.
- * Returns non-zero if @l was not @u, and zero otherwise.
+ * Atomically adds @a to @l, if @v was not already @u.
+ * Returns true if the addition was done.
*/
-#define local_add_unless(l, a, u) \
-({ \
- long c, old; \
- c = local_read(l); \
- while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
+static inline bool
+local_add_unless(local_t *l, long a, long u)
+{
+ long c = local_read(l);
+
+ do {
+ if (unlikely(c == u))
+ return false;
+ } while (!local_try_cmpxchg(l, &c, c + a));
+
+ return true;
+}
+
#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
#define local_dec_return(l) local_sub_return(1, (l))
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 33531d432b..46366e783c 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -226,6 +226,7 @@
#define LOONGARCH_CSR_ECFG 0x4 /* Exception config */
#define CSR_ECFG_VS_SHIFT 16
#define CSR_ECFG_VS_WIDTH 3
+#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1)
#define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
#define CSR_ECFG_IM_SHIFT 0
#define CSR_ECFG_IM_WIDTH 14
@@ -314,13 +315,14 @@
#define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
#define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */
-#define CSR_GTLBC_RID_SHIFT 16
-#define CSR_GTLBC_RID_WIDTH 8
-#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT)
+#define CSR_GTLBC_TGID_SHIFT 16
+#define CSR_GTLBC_TGID_WIDTH 8
+#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1)
+#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT)
#define CSR_GTLBC_TOTI_SHIFT 13
#define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT)
-#define CSR_GTLBC_USERID_SHIFT 12
-#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT)
+#define CSR_GTLBC_USETGID_SHIFT 12
+#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT)
#define CSR_GTLBC_GMTLBSZ_SHIFT 0
#define CSR_GTLBC_GMTLBSZ_WIDTH 6
#define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT)
@@ -475,6 +477,7 @@
#define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */
#define CSR_GSTAT_GID_SHIFT 16
#define CSR_GSTAT_GID_WIDTH 8
+#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1)
#define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
#define CSR_GSTAT_GIDBIT_SHIFT 4
#define CSR_GSTAT_GIDBIT_WIDTH 6
@@ -525,6 +528,12 @@
#define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
#define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
#define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
+#define CSR_GCFG_MATP_NEST_SHIFT 2
+#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT)
+#define CSR_GCFG_MATP_ROOT_SHIFT 1
+#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT)
+#define CSR_GCFG_MATP_GUEST_SHIFT 0
+#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT)
#define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */
#define CSR_GINTC_HC_SHIFT 16
@@ -1089,12 +1098,11 @@
static __always_inline u64 drdtime(void)
{
- int rID = 0;
u64 val = 0;
__asm__ __volatile__(
- "rdtime.d %0, %1 \n\t"
- : "=r"(val), "=r"(rID)
+ "rdtime.d %0, $zero\n\t"
+ : "=r"(val)
:
);
return val;
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index ed5da02b1c..9b36ac003f 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -40,13 +40,13 @@ static __always_inline unsigned long __percpu_##op(void *ptr, \
switch (size) { \
case 4: \
__asm__ __volatile__( \
- "am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
+ "am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
: [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \
: [val] "r" (val)); \
break; \
case 8: \
__asm__ __volatile__( \
- "am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
+ "am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
: [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \
: [val] "r" (val)); \
break; \
@@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
-static __always_inline unsigned long __percpu_read(void *ptr, int size)
+static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size)
{
unsigned long ret;
@@ -100,7 +100,7 @@ static __always_inline unsigned long __percpu_read(void *ptr, int size)
return ret;
}
-static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
+static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
@@ -132,8 +132,7 @@ static __always_inline void __percpu_write(void *ptr, unsigned long val, int siz
}
}
-static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
- int size)
+static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index 79470f0b4f..4e2d6b7ca2 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -84,6 +84,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
if (!ptdesc)
return NULL;
+ pagetable_pud_ctor(ptdesc);
pud = ptdesc_address(ptdesc);
pud_init(pud);