1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
|
/* $Id: CPUMInternal.h $ */
/** @file
* CPUM - Internal header file.
*/
/*
* Copyright (C) 2006-2020 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#ifndef VMM_INCLUDED_SRC_include_CPUMInternal_h
#define VMM_INCLUDED_SRC_include_CPUMInternal_h
#ifndef RT_WITHOUT_PRAGMA_ONCE
# pragma once
#endif
#ifndef VBOX_FOR_DTRACE_LIB
# include <VBox/cdefs.h>
# include <VBox/types.h>
# include <VBox/vmm/stam.h>
# include <iprt/x86.h>
# include <VBox/vmm/pgm.h>
#else
# pragma D depends_on library x86.d
# pragma D depends_on library cpumctx.d
# pragma D depends_on library cpum.d
/* Some fudging. */
typedef uint64_t STAMCOUNTER;
#endif
/** @defgroup grp_cpum_int Internals
* @ingroup grp_cpum
* @internal
* @{
*/
/** Flags and types for CPUM fault handlers
* @{ */
/** Type: Load DS */
#define CPUM_HANDLER_DS 1
/** Type: Load ES */
#define CPUM_HANDLER_ES 2
/** Type: Load FS */
#define CPUM_HANDLER_FS 3
/** Type: Load GS */
#define CPUM_HANDLER_GS 4
/** Type: IRET */
#define CPUM_HANDLER_IRET 5
/** Type mask. */
#define CPUM_HANDLER_TYPEMASK 0xff
/** If set EBP points to the CPUMCTXCORE that's being used. */
#define CPUM_HANDLER_CTXCORE_IN_EBP RT_BIT(31)
/** @} */
/** Use flags (CPUM::fUseFlags).
* (Don't forget to sync this with CPUMInternal.mac !)
* @note Part of saved state.
* @{ */
/** Indicates that we've saved the host FPU, SSE, whatever state and that it
* needs to be restored. */
#define CPUM_USED_FPU_HOST RT_BIT(0)
/** Indicates that we've loaded the guest FPU, SSE, whatever state and that it
* needs to be saved. */
#define CPUM_USED_FPU_GUEST RT_BIT(10)
/** Used the guest FPU, SSE or such stuff since last we were in REM.
* REM syncing is clearing this, lazy FPU is setting it. */
#define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
/** The XMM state was manually restored. (AMD only) */
#define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
/** Host OS is using SYSENTER and we must NULL the CS. */
#define CPUM_USE_SYSENTER RT_BIT(3)
/** Host OS is using SYSENTER and we must NULL the CS. */
#define CPUM_USE_SYSCALL RT_BIT(4)
/** Debug registers are used by host and that DR7 and DR6 must be saved and
* disabled when switching to raw-mode. */
#define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
/** Records that we've saved the host DRx registers.
* In ring-0 this means all (DR0-7), while in raw-mode context this means DR0-3
* since DR6 and DR7 are covered by CPUM_USE_DEBUG_REGS_HOST. */
#define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
/** Set to indicate that we should save host DR0-7 and load the hypervisor debug
* registers in the raw-mode world switchers. (See CPUMRecalcHyperDRx.) */
#define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
/** Used in ring-0 to indicate that we have loaded the hypervisor debug
* registers. */
#define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
/** Used in ring-0 to indicate that we have loaded the guest debug
* registers (DR0-3 and maybe DR6) for direct use by the guest.
* DR7 (and AMD-V DR6) are handled via the VMCB. */
#define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
/** Sync the FPU state on next entry (32->64 switcher only). */
#define CPUM_SYNC_FPU_STATE RT_BIT(16)
/** Sync the debug state on next entry (32->64 switcher only). */
#define CPUM_SYNC_DEBUG_REGS_GUEST RT_BIT(17)
/** Sync the debug state on next entry (32->64 switcher only).
* Almost the same as CPUM_USE_DEBUG_REGS_HYPER in the raw-mode switchers. */
#define CPUM_SYNC_DEBUG_REGS_HYPER RT_BIT(18)
/** Host CPU requires fxsave/fxrstor leaky bit handling. */
#define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
/** Set if the VM supports long-mode. */
#define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
/** @} */
/** @name CPUM Saved State Version.
* @{ */
/** The current saved state version. */
#define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM
/** The saved state version including VMX hardware virtualization state (IEM only
* execution). */
#define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM 19
/** The saved state version including SVM hardware virtualization state. */
#define CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 18
/** The saved state version including XSAVE state. */
#define CPUM_SAVED_STATE_VERSION_XSAVE 17
/** The saved state version with good CPUID leaf count. */
#define CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT 16
/** CPUID changes with explode forgetting to update the leaf count on
* restore, resulting in garbage being saved restoring+saving old states). */
#define CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT 15
/** The saved state version before the CPUIDs changes. */
#define CPUM_SAVED_STATE_VERSION_PUT_STRUCT 14
/** The saved state version before using SSMR3PutStruct. */
#define CPUM_SAVED_STATE_VERSION_MEM 13
/** The saved state version before introducing the MSR size field. */
#define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE 12
/** The saved state version of 3.2, 3.1 and 3.3 trunk before the hidden
* selector register change (CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID). */
#define CPUM_SAVED_STATE_VERSION_VER3_2 11
/** The saved state version of 3.0 and 3.1 trunk before the teleportation
* changes. */
#define CPUM_SAVED_STATE_VERSION_VER3_0 10
/** The saved state version for the 2.1 trunk before the MSR changes. */
#define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR 9
/** The saved state version of 2.0, used for backwards compatibility. */
#define CPUM_SAVED_STATE_VERSION_VER2_0 8
/** The saved state version of 1.6, used for backwards compatibility. */
#define CPUM_SAVED_STATE_VERSION_VER1_6 6
/** @} */
/**
* CPU info
*/
typedef struct CPUMINFO
{
/** The number of MSR ranges (CPUMMSRRANGE) in the array pointed to below. */
uint32_t cMsrRanges;
/** Mask applied to ECX before looking up the MSR for a RDMSR/WRMSR
* instruction. Older hardware has been observed to ignore higher bits. */
uint32_t fMsrMask;
/** MXCSR mask. */
uint32_t fMxCsrMask;
/** The number of CPUID leaves (CPUMCPUIDLEAF) in the array pointed to below. */
uint32_t cCpuIdLeaves;
/** The index of the first extended CPUID leaf in the array.
* Set to cCpuIdLeaves if none present. */
uint32_t iFirstExtCpuIdLeaf;
/** How to handle unknown CPUID leaves. */
CPUMUNKNOWNCPUID enmUnknownCpuIdMethod;
/** For use with CPUMUNKNOWNCPUID_DEFAULTS (DB & VM),
* CPUMUNKNOWNCPUID_LAST_STD_LEAF (VM) and CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX (VM). */
CPUMCPUID DefCpuId;
/** Scalable bus frequency used for reporting other frequencies. */
uint64_t uScalableBusFreq;
/** Pointer to the MSR ranges (ring-0 pointer). */
R0PTRTYPE(PCPUMMSRRANGE) paMsrRangesR0;
/** Pointer to the CPUID leaves (ring-0 pointer). */
R0PTRTYPE(PCPUMCPUIDLEAF) paCpuIdLeavesR0;
/** Pointer to the MSR ranges (ring-3 pointer). */
R3PTRTYPE(PCPUMMSRRANGE) paMsrRangesR3;
/** Pointer to the CPUID leaves (ring-3 pointer). */
R3PTRTYPE(PCPUMCPUIDLEAF) paCpuIdLeavesR3;
} CPUMINFO;
/** Pointer to a CPU info structure. */
typedef CPUMINFO *PCPUMINFO;
/** Pointer to a const CPU info structure. */
typedef CPUMINFO const *CPCPUMINFO;
/**
* The saved host CPU state.
*/
typedef struct CPUMHOSTCTX
{
/** General purpose register, selectors, flags and more
* @{ */
/** General purpose register ++
* { */
/*uint64_t rax; - scratch*/
uint64_t rbx;
/*uint64_t rcx; - scratch*/
/*uint64_t rdx; - scratch*/
uint64_t rdi;
uint64_t rsi;
uint64_t rbp;
uint64_t rsp;
/*uint64_t r8; - scratch*/
/*uint64_t r9; - scratch*/
uint64_t r10;
uint64_t r11;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
/*uint64_t rip; - scratch*/
uint64_t rflags;
/** @} */
/** Selector registers
* @{ */
RTSEL ss;
RTSEL ssPadding;
RTSEL gs;
RTSEL gsPadding;
RTSEL fs;
RTSEL fsPadding;
RTSEL es;
RTSEL esPadding;
RTSEL ds;
RTSEL dsPadding;
RTSEL cs;
RTSEL csPadding;
/** @} */
/** Control registers.
* @{ */
/** The CR0 FPU state in HM mode. */
uint64_t cr0;
/*uint64_t cr2; - scratch*/
uint64_t cr3;
uint64_t cr4;
uint64_t cr8;
/** @} */
/** Debug registers.
* @{ */
uint64_t dr0;
uint64_t dr1;
uint64_t dr2;
uint64_t dr3;
uint64_t dr6;
uint64_t dr7;
/** @} */
/** Global Descriptor Table register. */
X86XDTR64 gdtr;
uint16_t gdtrPadding;
/** Interrupt Descriptor Table register. */
X86XDTR64 idtr;
uint16_t idtrPadding;
/** The task register. */
RTSEL ldtr;
RTSEL ldtrPadding;
/** The task register. */
RTSEL tr;
RTSEL trPadding;
/** MSRs
* @{ */
CPUMSYSENTER SysEnter;
uint64_t FSbase;
uint64_t GSbase;
uint64_t efer;
/** @} */
/* padding to get 64byte aligned size */
uint8_t auPadding[8];
#if HC_ARCH_BITS != 64
# error HC_ARCH_BITS not defined or unsupported
#endif
/** Pointer to the FPU/SSE/AVX/XXXX state ring-0 mapping. */
R0PTRTYPE(PX86XSAVEAREA) pXStateR0;
/** Pointer to the FPU/SSE/AVX/XXXX state ring-3 mapping. */
R3PTRTYPE(PX86XSAVEAREA) pXStateR3;
/** The XCR0 register. */
uint64_t xcr0;
/** The mask to pass to XSAVE/XRSTOR in EDX:EAX. If zero we use
* FXSAVE/FXRSTOR (since bit 0 will always be set, we only need to test it). */
uint64_t fXStateMask;
} CPUMHOSTCTX;
#ifndef VBOX_FOR_DTRACE_LIB
AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
#endif
/** Pointer to the saved host CPU state. */
typedef CPUMHOSTCTX *PCPUMHOSTCTX;
/**
* The hypervisor context CPU state (just DRx left now).
*/
typedef struct CPUMHYPERCTX
{
/** Debug registers.
* @remarks DR4 and DR5 should not be used since they are aliases for
* DR6 and DR7 respectively on both AMD and Intel CPUs.
* @remarks DR8-15 are currently not supported by AMD or Intel, so
* neither do we.
*/
uint64_t dr[8];
/** @todo eliminiate the rest. */
uint64_t cr3;
uint64_t au64Padding[7];
} CPUMHYPERCTX;
#ifndef VBOX_FOR_DTRACE_LIB
AssertCompileSizeAlignment(CPUMHYPERCTX, 64);
#endif
/** Pointer to the hypervisor context CPU state. */
typedef CPUMHYPERCTX *PCPUMHYPERCTX;
/**
* CPUM Data (part of VM)
*/
typedef struct CPUM
{
/** Use flags.
* These flags indicates which CPU features the host uses.
*/
uint32_t fHostUseFlags;
/** CR4 mask */
struct
{
uint32_t AndMask; /**< @todo Move these to the per-CPU structure and fix the switchers. Saves a register! */
uint32_t OrMask;
} CR4;
/** The (more) portable CPUID level. */
uint8_t u8PortableCpuIdLevel;
/** Indicates that a state restore is pending.
* This is used to verify load order dependencies (PGM). */
bool fPendingRestore;
uint8_t abPadding0[2];
/** XSAVE/XRTOR components we can expose to the guest mask. */
uint64_t fXStateGuestMask;
/** XSAVE/XRSTOR host mask. Only state components in this mask can be exposed
* to the guest. This is 0 if no XSAVE/XRSTOR bits can be exposed. */
uint64_t fXStateHostMask;
/** The host MXCSR mask (determined at init). */
uint32_t fHostMxCsrMask;
/** Nested VMX: Whether to expose VMX-preemption timer to the guest. */
bool fNestedVmxPreemptTimer;
uint8_t abPadding1[3];
/** Align to 64-byte boundary. */
uint8_t abPadding2[20+4];
/** Host CPU feature information.
* Externaly visible via the VM structure, aligned on 64-byte boundrary. */
CPUMFEATURES HostFeatures;
/** Guest CPU feature information.
* Externaly visible via that VM structure, aligned with HostFeatures. */
CPUMFEATURES GuestFeatures;
/** Guest CPU info. */
CPUMINFO GuestInfo;
/** The standard set of CpuId leaves. */
CPUMCPUID aGuestCpuIdPatmStd[6];
/** The extended set of CpuId leaves. */
CPUMCPUID aGuestCpuIdPatmExt[10];
/** The centaur set of CpuId leaves. */
CPUMCPUID aGuestCpuIdPatmCentaur[4];
/** @name MSR statistics.
* @{ */
STAMCOUNTER cMsrWrites;
STAMCOUNTER cMsrWritesToIgnoredBits;
STAMCOUNTER cMsrWritesRaiseGp;
STAMCOUNTER cMsrWritesUnknown;
STAMCOUNTER cMsrReads;
STAMCOUNTER cMsrReadsRaiseGp;
STAMCOUNTER cMsrReadsUnknown;
/** @} */
} CPUM;
#ifndef VBOX_FOR_DTRACE_LIB
AssertCompileMemberOffset(CPUM, HostFeatures, 64);
AssertCompileMemberOffset(CPUM, GuestFeatures, 112);
#endif
/** Pointer to the CPUM instance data residing in the shared VM structure. */
typedef CPUM *PCPUM;
/**
* CPUM Data (part of VMCPU)
*/
typedef struct CPUMCPU
{
/**
* Guest context.
* Aligned on a 64-byte boundary.
*/
CPUMCTX Guest;
/**
* Guest context - misc MSRs
* Aligned on a 64-byte boundary.
*/
CPUMCTXMSRS GuestMsrs;
/** Nested VMX: VMX-preemption timer - R0 ptr. */
PTMTIMERR0 pNestedVmxPreemptTimerR0;
/** Nested VMX: VMX-preemption timer - R3 ptr. */
PTMTIMERR3 pNestedVmxPreemptTimerR3;
/** Use flags.
* These flags indicates both what is to be used and what has been used.
*/
uint32_t fUseFlags;
/** Changed flags.
* These flags indicates to REM (and others) which important guest
* registers which has been changed since last time the flags were cleared.
* See the CPUM_CHANGED_* defines for what we keep track of.
*/
uint32_t fChanged;
/** Temporary storage for the return code of the function called in the
* 32-64 switcher. */
uint32_t u32RetCode;
#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
/** Used by the world switcher code to store which vectors needs restoring on
* the way back. */
uint32_t fApicDisVectors;
/** The address of the APIC mapping, NULL if no APIC.
* Call CPUMR0SetLApic to update this before doing a world switch. */
RTHCPTR pvApicBase;
/** Set if the CPU has the X2APIC mode enabled.
* Call CPUMR0SetLApic to update this before doing a world switch. */
bool fX2Apic;
#else
uint8_t abPadding3[4 + sizeof(RTHCPTR) + 1];
#endif
/** Have we entered the recompiler? */
bool fRemEntered;
/** Whether the X86_CPUID_FEATURE_EDX_APIC and X86_CPUID_AMD_FEATURE_EDX_APIC
* (?) bits are visible or not. (The APIC is responsible for setting this
* when loading state, so we won't save it.) */
bool fCpuIdApicFeatureVisible;
/** Align the next member on a 64-byte boundary. */
uint8_t abPadding2[64 - (16 + 12 + 4 + 8 + 1 + 2)];
/** Saved host context. Only valid while inside RC or HM contexts.
* Must be aligned on a 64-byte boundary. */
CPUMHOSTCTX Host;
/** Old hypervisor context, only used for combined DRx values now.
* Must be aligned on a 64-byte boundary. */
CPUMHYPERCTX Hyper;
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
uint8_t aMagic[56];
uint64_t uMagic;
#endif
} CPUMCPU;
/** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */
typedef CPUMCPU *PCPUMCPU;
#ifndef VBOX_FOR_DTRACE_LIB
RT_C_DECLS_BEGIN
PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf);
PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit);
# ifdef IN_RING3
int cpumR3DbgInit(PVM pVM);
int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, PCPUMFEATURES pFeatures);
int cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs);
void cpumR3InitVmxGuestFeaturesAndMsrs(PVM pVM, PCVMXMSRS pHostVmxMsrs, PVMXMSRS pGuestVmxMsrs);
void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM);
int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pGuestMsrs);
int cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion);
DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo);
int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange);
int cpumR3MsrReconcileWithCpuId(PVM pVM);
int cpumR3MsrApplyFudge(PVM pVM);
int cpumR3MsrRegStats(PVM pVM);
int cpumR3MsrStrictInitChecks(void);
PCPUMMSRRANGE cpumLookupMsrRange(PVM pVM, uint32_t idMsr);
# endif
# ifdef IN_RC
DECLASM(int) cpumHandleLazyFPUAsm(PCPUMCPU pCPUM);
# endif
# ifdef IN_RING0
DECLASM(int) cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM);
DECLASM(void) cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM);
# if ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
DECLASM(void) cpumR0RestoreHostFPUState(PCPUMCPU pCPUM);
# endif
# endif
# if defined(IN_RC) || defined(IN_RING0)
DECLASM(int) cpumRZSaveHostFPUState(PCPUMCPU pCPUM);
DECLASM(void) cpumRZSaveGuestFpuState(PCPUMCPU pCPUM, bool fLeaveFpuAccessible);
DECLASM(void) cpumRZSaveGuestSseRegisters(PCPUMCPU pCPUM);
DECLASM(void) cpumRZSaveGuestAvxRegisters(PCPUMCPU pCPUM);
# endif
RT_C_DECLS_END
#endif /* !VBOX_FOR_DTRACE_LIB */
/** @} */
#endif /* !VMM_INCLUDED_SRC_include_CPUMInternal_h */
|