1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/stringify.h>
#include <asm/paravirt.h>
#include <asm/asm-offsets.h>
#define PSTART(d, m) \
patch_data_##d.m
#define PEND(d, m) \
(PSTART(d, m) + sizeof(patch_data_##d.m))
#define PATCH(d, m, insn_buff, len) \
paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
#define PATCH_CASE(ops, m, data, insn_buff, len) \
case PARAVIRT_PATCH(ops.m): \
return PATCH(data, ops##_##m, insn_buff, len)
#ifdef CONFIG_PARAVIRT_XXL
struct patch_xxl {
const unsigned char irq_irq_disable[1];
const unsigned char irq_irq_enable[1];
const unsigned char irq_save_fl[2];
const unsigned char mmu_read_cr2[3];
const unsigned char mmu_read_cr3[3];
const unsigned char mmu_write_cr3[3];
const unsigned char irq_restore_fl[2];
const unsigned char cpu_wbinvd[2];
const unsigned char mov64[3];
};
static const struct patch_xxl patch_data_xxl = {
.irq_irq_disable = { 0xfa }, // cli
.irq_irq_enable = { 0xfb }, // sti
.irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax
.mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
.mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
.mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
.irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
};
unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
{
return PATCH(xxl, mov64, insn_buff, len);
}
# endif /* CONFIG_PARAVIRT_XXL */
#ifdef CONFIG_PARAVIRT_SPINLOCKS
struct patch_lock {
unsigned char queued_spin_unlock[3];
unsigned char vcpu_is_preempted[2];
};
static const struct patch_lock patch_data_lock = {
.vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax
# ifdef CONFIG_X86_64
.queued_spin_unlock = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi)
# else
.queued_spin_unlock = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax)
# endif
};
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
unsigned int len)
{
switch (type) {
#ifdef CONFIG_PARAVIRT_XXL
PATCH_CASE(irq, restore_fl, xxl, insn_buff, len);
PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
#endif
#ifdef CONFIG_PARAVIRT_SPINLOCKS
case PARAVIRT_PATCH(lock.queued_spin_unlock):
if (pv_is_native_spin_unlock())
return PATCH(lock, queued_spin_unlock, insn_buff, len);
break;
case PARAVIRT_PATCH(lock.vcpu_is_preempted):
if (pv_is_native_vcpu_is_preempted())
return PATCH(lock, vcpu_is_preempted, insn_buff, len);
break;
#endif
default:
break;
}
return paravirt_patch_default(type, insn_buff, addr, len);
}
|