1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
|
/* $Id: VMInternal.h $ */
/** @file
* VM - Internal header file.
*/
/*
* Copyright (C) 2006-2020 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#ifndef VMM_INCLUDED_SRC_include_VMInternal_h
#define VMM_INCLUDED_SRC_include_VMInternal_h
#ifndef RT_WITHOUT_PRAGMA_ONCE
# pragma once
#endif
#include <VBox/cdefs.h>
#include <VBox/vmm/vmapi.h>
#include <iprt/assert.h>
#include <iprt/critsect.h>
#include <setjmp.h>
/** @defgroup grp_vm_int Internals
* @ingroup grp_vm
* @internal
* @{
*/
/**
* VM state change callback.
*/
typedef struct VMATSTATE
{
/** Pointer to the next one. */
struct VMATSTATE *pNext;
/** Pointer to the callback. */
PFNVMATSTATE pfnAtState;
/** The user argument. */
void *pvUser;
} VMATSTATE;
/** Pointer to a VM state change callback. */
typedef VMATSTATE *PVMATSTATE;
/**
* VM error callback.
*/
typedef struct VMATERROR
{
/** Pointer to the next one. */
struct VMATERROR *pNext;
/** Pointer to the callback. */
PFNVMATERROR pfnAtError;
/** The user argument. */
void *pvUser;
} VMATERROR;
/** Pointer to a VM error callback. */
typedef VMATERROR *PVMATERROR;
/**
* Chunk of memory allocated off the hypervisor heap in which
* we copy the error details.
*/
typedef struct VMERROR
{
/** The size of the chunk. */
uint32_t cbAllocated;
/** The current offset into the chunk.
* We start by putting the filename and function immediately
* after the end of the buffer. */
uint32_t off;
/** Offset from the start of this structure to the file name. */
uint32_t offFile;
/** The line number. */
uint32_t iLine;
/** Offset from the start of this structure to the function name. */
uint32_t offFunction;
/** Offset from the start of this structure to the formatted message text. */
uint32_t offMessage;
/** The VBox status code. */
int32_t rc;
} VMERROR, *PVMERROR;
/**
* VM runtime error callback.
*/
typedef struct VMATRUNTIMEERROR
{
/** Pointer to the next one. */
struct VMATRUNTIMEERROR *pNext;
/** Pointer to the callback. */
PFNVMATRUNTIMEERROR pfnAtRuntimeError;
/** The user argument. */
void *pvUser;
} VMATRUNTIMEERROR;
/** Pointer to a VM error callback. */
typedef VMATRUNTIMEERROR *PVMATRUNTIMEERROR;
/**
* Chunk of memory allocated off the hypervisor heap in which
* we copy the runtime error details.
*/
typedef struct VMRUNTIMEERROR
{
/** The size of the chunk. */
uint32_t cbAllocated;
/** The current offset into the chunk.
* We start by putting the error ID immediately
* after the end of the buffer. */
uint32_t off;
/** Offset from the start of this structure to the error ID. */
uint32_t offErrorId;
/** Offset from the start of this structure to the formatted message text. */
uint32_t offMessage;
/** Error flags. */
uint32_t fFlags;
} VMRUNTIMEERROR, *PVMRUNTIMEERROR;
/** The halt method. */
typedef enum
{
/** The usual invalid value. */
VMHALTMETHOD_INVALID = 0,
/** Use the method used during bootstrapping. */
VMHALTMETHOD_BOOTSTRAP,
/** Use the default method. */
VMHALTMETHOD_DEFAULT,
/** The old spin/yield/block method. */
VMHALTMETHOD_OLD,
/** The first go at a block/spin method. */
VMHALTMETHOD_1,
/** The first go at a more global approach. */
VMHALTMETHOD_GLOBAL_1,
/** The end of valid methods. (not inclusive of course) */
VMHALTMETHOD_END,
/** The usual 32-bit max value. */
VMHALTMETHOD_32BIT_HACK = 0x7fffffff
} VMHALTMETHOD;
/**
* VM Internal Data (part of the VM structure).
*
* @todo Move this and all related things to VMM. The VM component was, to some
* extent at least, a bad ad hoc design which should all have been put in
* VMM. @see pg_vm.
*/
typedef struct VMINT
{
/** VM Error Message. */
R3PTRTYPE(PVMERROR) pErrorR3;
/** VM Runtime Error Message. */
R3PTRTYPE(PVMRUNTIMEERROR) pRuntimeErrorR3;
/** The VM was/is-being teleported and has not yet been fully resumed. */
bool fTeleportedAndNotFullyResumedYet;
/** The VM should power off instead of reset. */
bool fPowerOffInsteadOfReset;
/** Reset counter (soft + hard). */
uint32_t cResets;
/** Hard reset counter. */
uint32_t cHardResets;
/** Soft reset counter. */
uint32_t cSoftResets;
} VMINT;
/** Pointer to the VM Internal Data (part of the VM structure). */
typedef VMINT *PVMINT;
#ifdef IN_RING3
/**
* VM internal data kept in the UVM.
*/
typedef struct VMINTUSERPERVM
{
/** Head of the standard request queue. Atomic. */
volatile PVMREQ pNormalReqs;
/** Head of the priority request queue. Atomic. */
volatile PVMREQ pPriorityReqs;
/** The last index used during alloc/free. */
volatile uint32_t iReqFree;
/** Number of free request packets. */
volatile uint32_t cReqFree;
/** Array of pointers to lists of free request packets. Atomic. */
volatile PVMREQ apReqFree[16 - (HC_ARCH_BITS == 32 ? 5 : 4)];
/** The reference count of the UVM handle. */
volatile uint32_t cUvmRefs;
/** Number of active EMTs. */
volatile uint32_t cActiveEmts;
# ifdef VBOX_WITH_STATISTICS
# if HC_ARCH_BITS == 32
uint32_t uPadding;
# endif
/** Number of VMR3ReqAlloc returning a new packet. */
STAMCOUNTER StatReqAllocNew;
/** Number of VMR3ReqAlloc causing races. */
STAMCOUNTER StatReqAllocRaces;
/** Number of VMR3ReqAlloc returning a recycled packet. */
STAMCOUNTER StatReqAllocRecycled;
/** Number of VMR3ReqFree calls. */
STAMCOUNTER StatReqFree;
/** Number of times the request was actually freed. */
STAMCOUNTER StatReqFreeOverflow;
/** Number of requests served. */
STAMCOUNTER StatReqProcessed;
/** Number of times there are more than one request and the others needed to be
* pushed back onto the list. */
STAMCOUNTER StatReqMoreThan1;
/** Number of times we've raced someone when pushing the other requests back
* onto the list. */
STAMCOUNTER StatReqPushBackRaces;
# endif
/** Pointer to the support library session.
* Mainly for creation and destruction. */
PSUPDRVSESSION pSession;
/** Force EMT to terminate. */
bool volatile fTerminateEMT;
/** Critical section for pAtState and enmPrevVMState. */
RTCRITSECT AtStateCritSect;
/** List of registered state change callbacks. */
PVMATSTATE pAtState;
/** List of registered state change callbacks. */
PVMATSTATE *ppAtStateNext;
/** The previous VM state.
* This is mainly used for the 'Resetting' state, but may come in handy later
* and when debugging. */
VMSTATE enmPrevVMState;
/** Reason for the most recent suspend operation. */
VMSUSPENDREASON enmSuspendReason;
/** Reason for the most recent operation. */
VMRESUMEREASON enmResumeReason;
/** Critical section for pAtError and pAtRuntimeError. */
RTCRITSECT AtErrorCritSect;
/** List of registered error callbacks. */
PVMATERROR pAtError;
/** List of registered error callbacks. */
PVMATERROR *ppAtErrorNext;
/** The error message count.
* This is incremented every time an error is raised. */
uint32_t volatile cErrors;
/** The runtime error message count.
* This is incremented every time a runtime error is raised. */
uint32_t volatile cRuntimeErrors;
/** List of registered error callbacks. */
PVMATRUNTIMEERROR pAtRuntimeError;
/** List of registered error callbacks. */
PVMATRUNTIMEERROR *ppAtRuntimeErrorNext;
/** @name Generic Halt data
* @{
*/
/** The current halt method.
* Can be selected by CFGM option 'VM/HaltMethod'. */
VMHALTMETHOD enmHaltMethod;
/** The index into g_aHaltMethods of the current halt method. */
uint32_t volatile iHaltMethod;
/** @} */
/** @todo Do NOT add new members here or reuse the current, we need to store the config for
* each halt method separately because we're racing on SMP guest rigs. */
union
{
/**
* Method 1 & 2 - Block whenever possible, and when lagging behind
* switch to spinning with regular blocking every 5-200ms (defaults)
* depending on the accumulated lag. The blocking interval is adjusted
* with the average oversleeping of the last 64 times.
*
* The difference between 1 and 2 is that we use native absolute
* time APIs for the blocking instead of the millisecond based IPRT
* interface.
*/
struct
{
/** The max interval without blocking (when spinning). */
uint32_t u32MinBlockIntervalCfg;
/** The minimum interval between blocking (when spinning). */
uint32_t u32MaxBlockIntervalCfg;
/** The value to divide the current lag by to get the raw blocking interval (when spinning). */
uint32_t u32LagBlockIntervalDivisorCfg;
/** When to start spinning (lag / nano secs). */
uint32_t u32StartSpinningCfg;
/** When to stop spinning (lag / nano secs). */
uint32_t u32StopSpinningCfg;
} Method12;
/**
* The GVMM manages halted and waiting EMTs.
*/
struct
{
/** The threshold between spinning and blocking. */
uint32_t cNsSpinBlockThresholdCfg;
} Global1;
} Halt;
/** Pointer to the DBGC instance data. */
void *pvDBGC;
/** TLS index for the VMINTUSERPERVMCPU pointer. */
RTTLS idxTLS;
/** The VM name. (Set after the config constructure has been called.) */
char *pszName;
/** The VM UUID. (Set after the config constructure has been called.) */
RTUUID Uuid;
} VMINTUSERPERVM;
# ifdef VBOX_WITH_STATISTICS
AssertCompileMemberAlignment(VMINTUSERPERVM, StatReqAllocNew, 8);
# endif
/** Pointer to the VM internal data kept in the UVM. */
typedef VMINTUSERPERVM *PVMINTUSERPERVM;
/**
* VMCPU internal data kept in the UVM.
*
* Almost a copy of VMINTUSERPERVM. Separate data properly later on.
*/
typedef struct VMINTUSERPERVMCPU
{
/** Head of the normal request queue. Atomic. */
volatile PVMREQ pNormalReqs;
/** Head of the priority request queue. Atomic. */
volatile PVMREQ pPriorityReqs;
/** The handle to the EMT thread. */
RTTHREAD ThreadEMT;
/** The native of the EMT thread. */
RTNATIVETHREAD NativeThreadEMT;
/** Wait event semaphore. */
RTSEMEVENT EventSemWait;
/** Wait/Idle indicator. */
bool volatile fWait;
/** Set if we've been thru vmR3Destroy and decremented the active EMT count
* already. */
bool volatile fBeenThruVmDestroy;
/** Align the next bit. */
bool afAlignment[HC_ARCH_BITS == 32 ? 2 : 6];
/** @name Generic Halt data
* @{
*/
/** The average time (ns) between two halts in the last second. (updated once per second) */
uint32_t HaltInterval;
/** The average halt frequency for the last second. (updated once per second) */
uint32_t HaltFrequency;
/** The number of halts in the current period. */
uint32_t cHalts;
uint32_t padding; /**< alignment padding. */
/** When we started counting halts in cHalts (RTTimeNanoTS). */
uint64_t u64HaltsStartTS;
/** @} */
/** Union containing data and config for the different halt algorithms. */
union
{
/**
* Method 1 & 2 - Block whenever possible, and when lagging behind
* switch to spinning with regular blocking every 5-200ms (defaults)
* depending on the accumulated lag. The blocking interval is adjusted
* with the average oversleeping of the last 64 times.
*
* The difference between 1 and 2 is that we use native absolute
* time APIs for the blocking instead of the millisecond based IPRT
* interface.
*/
struct
{
/** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
uint32_t cBlocks;
/** Align the next member. */
uint32_t u32Alignment;
/** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
uint64_t cNSBlockedTooLongAvg;
/** Total time spend oversleeping when blocking. */
uint64_t cNSBlockedTooLong;
/** Total time spent blocking. */
uint64_t cNSBlocked;
/** The timestamp (RTTimeNanoTS) of the last block. */
uint64_t u64LastBlockTS;
/** When we started spinning relentlessly in order to catch up some of the oversleeping.
* This is 0 when we're not spinning. */
uint64_t u64StartSpinTS;
} Method12;
# if 0
/**
* Method 3 & 4 - Same as method 1 & 2 respectivly, except that we
* sprinkle it with yields.
*/
struct
{
/** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
uint32_t cBlocks;
/** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
uint64_t cBlockedTooLongNSAvg;
/** Total time spend oversleeping when blocking. */
uint64_t cBlockedTooLongNS;
/** Total time spent blocking. */
uint64_t cBlockedNS;
/** The timestamp (RTTimeNanoTS) of the last block. */
uint64_t u64LastBlockTS;
/** How many times we've yielded while cBlockedNS and cBlockedTooLongNS has been accumulating. */
uint32_t cYields;
/** Avg. time spend oversleeping when yielding. */
uint32_t cYieldTooLongNSAvg;
/** Total time spend oversleeping when yielding. */
uint64_t cYieldTooLongNS;
/** Total time spent yielding. */
uint64_t cYieldedNS;
/** The timestamp (RTTimeNanoTS) of the last block. */
uint64_t u64LastYieldTS;
/** When we started spinning relentlessly in order to catch up some of the oversleeping. */
uint64_t u64StartSpinTS;
} Method34;
# endif
} Halt;
/** Profiling the halted state; yielding vs blocking.
* @{ */
STAMPROFILE StatHaltYield;
STAMPROFILE StatHaltBlock;
STAMPROFILE StatHaltBlockOverslept;
STAMPROFILE StatHaltBlockInsomnia;
STAMPROFILE StatHaltBlockOnTime;
STAMPROFILE StatHaltTimers;
STAMPROFILE StatHaltPoll;
/** @} */
} VMINTUSERPERVMCPU;
AssertCompileMemberAlignment(VMINTUSERPERVMCPU, u64HaltsStartTS, 8);
AssertCompileMemberAlignment(VMINTUSERPERVMCPU, Halt.Method12.cNSBlockedTooLongAvg, 8);
AssertCompileMemberAlignment(VMINTUSERPERVMCPU, StatHaltYield, 8);
/** Pointer to the VM internal data kept in the UVM. */
typedef VMINTUSERPERVMCPU *PVMINTUSERPERVMCPU;
#endif /* IN_RING3 */
RT_C_DECLS_BEGIN
DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArg);
int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod);
DECLCALLBACK(int) vmR3Destroy(PVM pVM);
DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *args);
void vmSetErrorCopy(PVM pVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list args);
DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage);
DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa);
void vmSetRuntimeErrorCopy(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va);
void vmR3SetTerminated(PVM pVM);
RT_C_DECLS_END
/** @} */
#endif /* !VMM_INCLUDED_SRC_include_VMInternal_h */
|