1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* AMD Encrypted Register State Support
*
* Author: Joerg Roedel <jroedel@suse.de>
*/
#ifndef __ASM_ENCRYPTED_STATE_H
#define __ASM_ENCRYPTED_STATE_H
#include <linux/types.h>
#include <linux/sev-guest.h>
#include <asm/insn.h>
#include <asm/sev-common.h>
#include <asm/bootparam.h>
#define GHCB_PROTOCOL_MIN 1ULL
#define GHCB_PROTOCOL_MAX 2ULL
#define GHCB_DEFAULT_USAGE 0ULL
#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
enum es_result {
ES_OK, /* All good */
ES_UNSUPPORTED, /* Requested operation not supported */
ES_VMM_ERROR, /* Unexpected state from the VMM */
ES_DECODE_FAILED, /* Instruction decoding failed */
ES_EXCEPTION, /* Instruction caused exception */
ES_RETRY, /* Retry instruction emulation */
};
struct es_fault_info {
unsigned long vector;
unsigned long error_code;
unsigned long cr2;
};
struct pt_regs;
/* ES instruction emulation context */
struct es_em_ctxt {
struct pt_regs *regs;
struct insn insn;
struct es_fault_info fi;
};
/*
* AMD SEV Confidential computing blob structure. The structure is
* defined in OVMF UEFI firmware header:
* https://github.com/tianocore/edk2/blob/master/OvmfPkg/Include/Guid/ConfidentialComputingSevSnpBlob.h
*/
#define CC_BLOB_SEV_HDR_MAGIC 0x45444d41
struct cc_blob_sev_info {
u32 magic;
u16 version;
u16 reserved;
u64 secrets_phys;
u32 secrets_len;
u32 rsvd1;
u64 cpuid_phys;
u32 cpuid_len;
u32 rsvd2;
} __packed;
void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code);
static inline u64 lower_bits(u64 val, unsigned int bits)
{
u64 mask = (1ULL << bits) - 1;
return (val & mask);
}
struct real_mode_header;
enum stack_type;
/* Early IDT entry points for #VC handler */
extern void vc_no_ghcb(void);
extern void vc_boot_ghcb(void);
extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
/* Software defined (when rFlags.CF = 1) */
#define PVALIDATE_FAIL_NOUPDATE 255
/* RMP page size */
#define RMP_PG_SIZE_4K 0
#define RMPADJUST_VMSA_PAGE_BIT BIT(16)
/* SNP Guest message request */
struct snp_req_data {
unsigned long req_gpa;
unsigned long resp_gpa;
unsigned long data_gpa;
unsigned int data_npages;
};
struct sev_guest_platform_data {
u64 secrets_gpa;
};
/*
* The secrets page contains 96-bytes of reserved field that can be used by
* the guest OS. The guest OS uses the area to save the message sequence
* number for each VMPCK.
*
* See the GHCB spec section Secret page layout for the format for this area.
*/
struct secrets_os_area {
u32 msg_seqno_0;
u32 msg_seqno_1;
u32 msg_seqno_2;
u32 msg_seqno_3;
u64 ap_jump_table_pa;
u8 rsvd[40];
u8 guest_usage[32];
} __packed;
#define VMPCK_KEY_LEN 32
/* See the SNP spec version 0.9 for secrets page format */
struct snp_secrets_page_layout {
u32 version;
u32 imien : 1,
rsvd1 : 31;
u32 fms;
u32 rsvd2;
u8 gosvw[16];
u8 vmpck0[VMPCK_KEY_LEN];
u8 vmpck1[VMPCK_KEY_LEN];
u8 vmpck2[VMPCK_KEY_LEN];
u8 vmpck3[VMPCK_KEY_LEN];
struct secrets_os_area os_area;
u8 rsvd3[3840];
} __packed;
#ifdef CONFIG_AMD_MEM_ENCRYPT
extern struct static_key_false sev_es_enable_key;
extern void __sev_es_ist_enter(struct pt_regs *regs);
extern void __sev_es_ist_exit(void);
static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
{
if (static_branch_unlikely(&sev_es_enable_key))
__sev_es_ist_enter(regs);
}
static __always_inline void sev_es_ist_exit(void)
{
if (static_branch_unlikely(&sev_es_enable_key))
__sev_es_ist_exit();
}
extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh);
extern void __sev_es_nmi_complete(void);
static __always_inline void sev_es_nmi_complete(void)
{
if (static_branch_unlikely(&sev_es_enable_key))
__sev_es_nmi_complete();
}
extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
extern void sev_enable(struct boot_params *bp);
static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs)
{
int rc;
/* "rmpadjust" mnemonic support in binutils 2.36 and newer */
asm volatile(".byte 0xF3,0x0F,0x01,0xFE\n\t"
: "=a"(rc)
: "a"(vaddr), "c"(rmp_psize), "d"(attrs)
: "memory", "cc");
return rc;
}
static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
{
bool no_rmpupdate;
int rc;
/* "pvalidate" mnemonic support in binutils 2.36 and newer */
asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFF\n\t"
CC_SET(c)
: CC_OUT(c) (no_rmpupdate), "=a"(rc)
: "a"(vaddr), "c"(rmp_psize), "d"(validate)
: "memory", "cc");
if (no_rmpupdate)
return PVALIDATE_FAIL_NOUPDATE;
return rc;
}
struct snp_guest_request_ioctl;
void setup_ghcb(void);
void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
unsigned long npages);
void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
unsigned long npages);
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
void snp_set_wakeup_secondary_cpu(void);
bool snp_init(struct boot_params *bp);
void __noreturn snp_abort(void);
void snp_dmi_setup(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
u64 snp_get_unsupported_features(u64 status);
u64 sev_get_status(void);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { }
static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
static inline void sev_es_nmi_complete(void) { }
static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
static inline void sev_enable(struct boot_params *bp) { }
static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) { return 0; }
static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; }
static inline void setup_ghcb(void) { }
static inline void __init
early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
static inline void __init
early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { }
static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { }
static inline void snp_set_wakeup_secondary_cpu(void) { }
static inline bool snp_init(struct boot_params *bp) { return false; }
static inline void snp_abort(void) { }
static inline void snp_dmi_setup(void) { }
static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
{
return -ENOTTY;
}
static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
static inline u64 sev_get_status(void) { return 0; }
#endif
#endif
|