1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
|
/* $Id: SELMAll.cpp $ */
/** @file
* SELM All contexts.
*/
/*
* Copyright (C) 2006-2019 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*********************************************************************************************************************************
* Header Files *
*********************************************************************************************************************************/
#define LOG_GROUP LOG_GROUP_SELM
#include <VBox/vmm/selm.h>
#include <VBox/vmm/stam.h>
#include <VBox/vmm/em.h>
#include <VBox/vmm/mm.h>
#include <VBox/vmm/hm.h>
#include <VBox/vmm/pgm.h>
#include <VBox/vmm/hm.h>
#include "SELMInternal.h"
#include <VBox/vmm/vm.h>
#include <VBox/err.h>
#include <VBox/param.h>
#include <iprt/assert.h>
#include <VBox/vmm/vmm.h>
#include <iprt/x86.h>
#include <iprt/string.h>
#include "SELMInline.h"
/*********************************************************************************************************************************
* Global Variables *
*********************************************************************************************************************************/
#if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
/** Segment register names. */
static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
#endif
#ifndef IN_RING0
# ifdef SELM_TRACK_GUEST_GDT_CHANGES
/**
* @callback_method_impl{FNPGMVIRTHANDLER}
*/
PGM_ALL_CB2_DECL(VBOXSTRICTRC)
selmGuestGDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
{
Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
Log(("selmGuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);
# ifdef IN_RING3
RT_NOREF_PV(pVM);
VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
return VINF_PGM_HANDLER_DO_DEFAULT;
# else /* IN_RC: */
/*
* Execute the write, doing necessary pre and post shadow GDT checks.
*/
PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
uint32_t offGuestGdt = pCtx->gdtr.pGdt - GCPtr;
selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
memcpy(pvBuf, pvPtr, cbBuf);
VBOXSTRICTRC rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
else
STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
return rcStrict;
# endif
}
# endif
# ifdef SELM_TRACK_GUEST_LDT_CHANGES
/**
* @callback_method_impl{FNPGMVIRTHANDLER}
*/
PGM_ALL_CB2_DECL(VBOXSTRICTRC)
selmGuestLDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
{
Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
Log(("selmGuestLDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser); RT_NOREF_PV(pVM);
VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
# ifdef IN_RING3
return VINF_PGM_HANDLER_DO_DEFAULT;
# else
STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
# endif
}
# endif
# ifdef SELM_TRACK_GUEST_TSS_CHANGES
/**
* @callback_method_impl{FNPGMVIRTHANDLER}
*/
PGM_ALL_CB2_DECL(VBOXSTRICTRC)
selmGuestTSSWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
{
Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
Log(("selmGuestTSSWriteHandler: write %.*Rhxs to %RGv size %d\n", RT_MIN(8, cbBuf), pvBuf, GCPtr, cbBuf));
NOREF(pvBuf); NOREF(GCPtr); NOREF(cbBuf); NOREF(enmOrigin); NOREF(pvUser); NOREF(pvPtr);
# ifdef IN_RING3
RT_NOREF_PV(pVM);
/** @todo This can be optimized by checking for the ESP0 offset and tracking TR
* reloads in REM (setting VM_FF_SELM_SYNC_TSS if TR is reloaded). We
* should probably also deregister the virtual handler if TR.base/size
* changes while we're in REM. May also share
* selmRCGuestTssPostWriteCheck code. */
VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
return VINF_PGM_HANDLER_DO_DEFAULT;
# else /* IN_RC */
/*
* Do the write and check if anything relevant changed.
*/
Assert(pVM->selm.s.GCPtrGuestTss != (uintptr_t)RTRCPTR_MAX);
memcpy(pvPtr, pvBuf, cbBuf);
return selmRCGuestTssPostWriteCheck(pVM, pVCpu, GCPtr - pVM->selm.s.GCPtrGuestTss, cbBuf);
# endif
}
# endif
#endif /* IN_RING0 */
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
/**
* Converts a GC selector based address to a flat address.
*
* No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
* for that.
*
* @returns Flat address.
* @param pVM The cross context VM structure.
* @param Sel Selector part.
* @param Addr Address part.
* @remarks Don't use when in long mode.
*/
VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
{
Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM))); /* DON'T USE! */
Assert(VM_IS_RAW_MODE_ENABLED(pVM));
/** @todo check the limit. */
X86DESC Desc;
if (!(Sel & X86_SEL_LDT))
Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
else
{
/** @todo handle LDT pages not present! */
PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
Desc = paLDT[Sel >> X86_SEL_SHIFT];
}
return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)) & 0xffffffff);
}
#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
/**
* Converts a GC selector based address to a flat address.
*
* No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
* for that.
*
* @returns Flat address.
* @param pVM The cross context VM structure.
* @param SelReg Selector register
* @param pCtxCore CPU context
* @param Addr Address part.
*/
VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr)
{
PCPUMSELREG pSReg;
PVMCPU pVCpu = VMMGetCpu(pVM);
int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg); AssertRC(rc);
/*
* Deal with real & v86 mode first.
*/
if ( pCtxCore->eflags.Bits.u1VM
|| CPUMIsGuestInRealMode(pVCpu))
{
uint32_t uFlat = (uint32_t)Addr & 0xffff;
if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
uFlat += (uint32_t)pSReg->u64Base;
else
uFlat += (uint32_t)pSReg->Sel << 4;
return (RTGCPTR)uFlat;
}
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
/** @todo when we're in 16 bits mode, we should cut off the address as well?? */
if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
#else
Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
#endif
/* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
(Intel� 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
if ( pCtxCore->cs.Attr.n.u1Long
&& CPUMIsGuestInLongMode(pVCpu))
{
switch (SelReg)
{
case DISSELREG_FS:
case DISSELREG_GS:
return (RTGCPTR)(pSReg->u64Base + Addr);
default:
return Addr; /* base 0 */
}
}
/* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
Assert(pSReg->u64Base <= 0xffffffff);
return (uint32_t)pSReg->u64Base + (uint32_t)Addr;
}
/**
* Converts a GC selector based address to a flat address.
*
* Some basic checking is done, but not all kinds yet.
*
* @returns VBox status
* @param pVCpu The cross context virtual CPU structure.
* @param SelReg Selector register.
* @param pCtxCore CPU context.
* @param Addr Address part.
* @param fFlags SELMTOFLAT_FLAGS_*
* GDT entires are valid.
* @param ppvGC Where to store the GC flat address.
*/
VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags, PRTGCPTR ppvGC)
{
/*
* Fetch the selector first.
*/
PCPUMSELREG pSReg;
int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg);
AssertRCReturn(rc, rc); AssertPtr(pSReg);
/*
* Deal with real & v86 mode first.
*/
if ( pCtxCore->eflags.Bits.u1VM
|| CPUMIsGuestInRealMode(pVCpu))
{
if (ppvGC)
{
uint32_t uFlat = (uint32_t)Addr & 0xffff;
if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
*ppvGC = (uint32_t)pSReg->u64Base + uFlat;
else
*ppvGC = ((uint32_t)pSReg->Sel << 4) + uFlat;
}
return VINF_SUCCESS;
}
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
#else
Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
#endif
/* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
(Intel� 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
RTGCPTR pvFlat;
bool fCheckLimit = true;
if ( pCtxCore->cs.Attr.n.u1Long
&& CPUMIsGuestInLongMode(pVCpu))
{
fCheckLimit = false;
switch (SelReg)
{
case DISSELREG_FS:
case DISSELREG_GS:
pvFlat = pSReg->u64Base + Addr;
break;
default:
pvFlat = Addr;
break;
}
}
else
{
/* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
Assert(pSReg->u64Base <= UINT32_C(0xffffffff));
pvFlat = (uint32_t)pSReg->u64Base + (uint32_t)Addr;
Assert(pvFlat <= UINT32_MAX);
}
/*
* Check type if present.
*/
if (pSReg->Attr.n.u1Present)
{
switch (pSReg->Attr.n.u4Type)
{
/* Read only selector type. */
case X86_SEL_TYPE_RO:
case X86_SEL_TYPE_RO_ACC:
case X86_SEL_TYPE_RW:
case X86_SEL_TYPE_RW_ACC:
case X86_SEL_TYPE_EO:
case X86_SEL_TYPE_EO_ACC:
case X86_SEL_TYPE_ER:
case X86_SEL_TYPE_ER_ACC:
if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
{
/** @todo fix this mess */
}
/* check limit. */
if (fCheckLimit && Addr > pSReg->u32Limit)
return VERR_OUT_OF_SELECTOR_BOUNDS;
/* ok */
if (ppvGC)
*ppvGC = pvFlat;
return VINF_SUCCESS;
case X86_SEL_TYPE_EO_CONF:
case X86_SEL_TYPE_EO_CONF_ACC:
case X86_SEL_TYPE_ER_CONF:
case X86_SEL_TYPE_ER_CONF_ACC:
if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
{
/** @todo fix this mess */
}
/* check limit. */
if (fCheckLimit && Addr > pSReg->u32Limit)
return VERR_OUT_OF_SELECTOR_BOUNDS;
/* ok */
if (ppvGC)
*ppvGC = pvFlat;
return VINF_SUCCESS;
case X86_SEL_TYPE_RO_DOWN:
case X86_SEL_TYPE_RO_DOWN_ACC:
case X86_SEL_TYPE_RW_DOWN:
case X86_SEL_TYPE_RW_DOWN_ACC:
if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
{
/** @todo fix this mess */
}
/* check limit. */
if (fCheckLimit)
{
if (!pSReg->Attr.n.u1Granularity && Addr > UINT32_C(0xffff))
return VERR_OUT_OF_SELECTOR_BOUNDS;
if (Addr <= pSReg->u32Limit)
return VERR_OUT_OF_SELECTOR_BOUNDS;
}
/* ok */
if (ppvGC)
*ppvGC = pvFlat;
return VINF_SUCCESS;
default:
return VERR_INVALID_SELECTOR;
}
}
return VERR_SELECTOR_NOT_PRESENT;
}
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
/**
* Converts a GC selector based address to a flat address.
*
* Some basic checking is done, but not all kinds yet.
*
* @returns VBox status
* @param pVCpu The cross context virtual CPU structure.
* @param eflags Current eflags
* @param Sel Selector part.
* @param Addr Address part.
* @param fFlags SELMTOFLAT_FLAGS_*
* GDT entires are valid.
* @param ppvGC Where to store the GC flat address.
* @param pcb Where to store the bytes from *ppvGC which can be accessed according to
* the selector. NULL is allowed.
* @remarks Don't use when in long mode.
*/
VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr,
uint32_t fFlags, PRTGCPTR ppvGC, uint32_t *pcb)
{
Assert(!CPUMIsGuestInLongMode(pVCpu)); /* DON'T USE! (Accessing shadow GDT/LDT.) */
Assert(VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)));
/*
* Deal with real & v86 mode first.
*/
if ( eflags.Bits.u1VM
|| CPUMIsGuestInRealMode(pVCpu))
{
RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
if (ppvGC)
*ppvGC = ((RTGCUINTPTR)Sel << 4) + uFlat;
if (pcb)
*pcb = 0x10000 - uFlat;
return VINF_SUCCESS;
}
/** @todo when we're in 16 bits mode, we should cut off the address as well?? */
X86DESC Desc;
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (!(Sel & X86_SEL_LDT))
{
if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
&& (Sel | X86_SEL_RPL_LDT) > pVM->selm.s.GuestGdtr.cbGdt)
return VERR_INVALID_SELECTOR;
Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
}
else
{
if ((Sel | X86_SEL_RPL_LDT) > pVM->selm.s.cbLdtLimit)
return VERR_INVALID_SELECTOR;
/** @todo handle LDT page(s) not present! */
PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
Desc = paLDT[Sel >> X86_SEL_SHIFT];
}
/* calc limit. */
uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
/* calc address assuming straight stuff. */
RTGCPTR pvFlat = Addr + X86DESC_BASE(&Desc);
/* Cut the address to 32 bits. */
Assert(!CPUMIsGuestInLongMode(pVCpu));
pvFlat &= 0xffffffff;
uint8_t u1Present = Desc.Gen.u1Present;
uint8_t u1Granularity = Desc.Gen.u1Granularity;
uint8_t u1DescType = Desc.Gen.u1DescType;
uint8_t u4Type = Desc.Gen.u4Type;
/*
* Check if present.
*/
if (u1Present)
{
/*
* Type check.
*/
#define BOTH(a, b) ((a << 16) | b)
switch (BOTH(u1DescType, u4Type))
{
/** Read only selector type. */
case BOTH(1,X86_SEL_TYPE_RO):
case BOTH(1,X86_SEL_TYPE_RO_ACC):
case BOTH(1,X86_SEL_TYPE_RW):
case BOTH(1,X86_SEL_TYPE_RW_ACC):
case BOTH(1,X86_SEL_TYPE_EO):
case BOTH(1,X86_SEL_TYPE_EO_ACC):
case BOTH(1,X86_SEL_TYPE_ER):
case BOTH(1,X86_SEL_TYPE_ER_ACC):
if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
{
/** @todo fix this mess */
}
/* check limit. */
if ((RTGCUINTPTR)Addr > u32Limit)
return VERR_OUT_OF_SELECTOR_BOUNDS;
/* ok */
if (ppvGC)
*ppvGC = pvFlat;
if (pcb)
*pcb = u32Limit - (uint32_t)Addr + 1;
return VINF_SUCCESS;
case BOTH(1,X86_SEL_TYPE_EO_CONF):
case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
case BOTH(1,X86_SEL_TYPE_ER_CONF):
case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
{
/** @todo fix this mess */
}
/* check limit. */
if ((RTGCUINTPTR)Addr > u32Limit)
return VERR_OUT_OF_SELECTOR_BOUNDS;
/* ok */
if (ppvGC)
*ppvGC = pvFlat;
if (pcb)
*pcb = u32Limit - (uint32_t)Addr + 1;
return VINF_SUCCESS;
case BOTH(1,X86_SEL_TYPE_RO_DOWN):
case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
case BOTH(1,X86_SEL_TYPE_RW_DOWN):
case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
{
/** @todo fix this mess */
}
/* check limit. */
if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
return VERR_OUT_OF_SELECTOR_BOUNDS;
if ((RTGCUINTPTR)Addr <= u32Limit)
return VERR_OUT_OF_SELECTOR_BOUNDS;
/* ok */
if (ppvGC)
*ppvGC = pvFlat;
if (pcb)
*pcb = (RTGCUINTPTR)(u1Granularity ? 0xffffffff : 0xffff) - (RTGCUINTPTR)Addr + 1;
return VINF_SUCCESS;
case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
case BOTH(0,X86_SEL_TYPE_SYS_LDT):
case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
{
/** @todo fix this mess */
}
/* check limit. */
if ((RTGCUINTPTR)Addr > u32Limit)
return VERR_OUT_OF_SELECTOR_BOUNDS;
/* ok */
if (ppvGC)
*ppvGC = pvFlat;
if (pcb)
*pcb = 0xffffffff - (RTGCUINTPTR)pvFlat + 1; /* Depends on the type.. fixme if we care. */
return VINF_SUCCESS;
default:
return VERR_INVALID_SELECTOR;
}
#undef BOTH
}
return VERR_SELECTOR_NOT_PRESENT;
}
#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
static void selLoadHiddenSelectorRegFromGuestTable(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg,
RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg)
{
Assert(VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)));
RT_NOREF_PV(pCtx); RT_NOREF_PV(Sel);
/*
* Try read the entry.
*/
X86DESC GstDesc;
VBOXSTRICTRC rcStrict = PGMPhysReadGCPtr(pVCpu, &GstDesc, GCPtrDesc, sizeof(GstDesc), PGMACCESSORIGIN_SELM);
if (rcStrict == VINF_SUCCESS)
{
/*
* Validate it and load it.
*/
if (selmIsGstDescGoodForSReg(pVCpu, pSReg, &GstDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
{
selmLoadHiddenSRegFromGuestDesc(pVCpu, pSReg, &GstDesc);
Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (gst)\n",
g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGst);
}
else
{
Log(("SELMLoadHiddenSelectorReg: Guest table entry is no good (%s=%#x): %.8Rhxs\n", g_aszSRegNms[iSReg], Sel, &GstDesc));
STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGstNoGood);
}
}
else
{
AssertMsg(RT_FAILURE_NP(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
Log(("SELMLoadHiddenSelectorReg: Error reading descriptor %s=%#x: %Rrc\n",
g_aszSRegNms[iSReg], Sel, VBOXSTRICTRC_VAL(rcStrict) ));
STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelReadErrors);
}
}
/**
* CPUM helper that loads the hidden selector register from the descriptor table
* when executing with raw-mode.
*
* @remarks This is only used when in legacy protected mode!
*
* @param pVCpu The cross context virtual CPU structure of the calling EMT.
* @param pCtx The guest CPU context.
* @param pSReg The selector register.
*
* @todo Deal 100% correctly with stale selectors. What's more evil is
* invalid page table entries, which isn't impossible to imagine for
* LDT entries for instance, though unlikely. Currently, we turn a
* blind eye to these issues and return the old hidden registers,
* though we don't set the valid flag, so that we'll try loading them
* over and over again till we succeed loading something.
*/
VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg)
{
Assert(pCtx->cr0 & X86_CR0_PE);
Assert(!(pCtx->msrEFER & MSR_K6_EFER_LMA));
PVM pVM = pVCpu->CTX_SUFF(pVM);
Assert(pVM->cCpus == 1);
Assert(VM_IS_RAW_MODE_ENABLED(pVM));
/*
* Get the shadow descriptor table entry and validate it.
* Should something go amiss, try the guest table.
*/
RTSEL const Sel = pSReg->Sel;
uint32_t const iSReg = pSReg - CPUMCTX_FIRST_SREG(pCtx); Assert(iSReg < X86_SREG_COUNT);
PCX86DESC pShwDesc;
if (!(Sel & X86_SEL_LDT))
{
/** @todo this shall not happen, we shall check for these things when executing
* LGDT */
AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->gdtr.cbGdt);
pShwDesc = &pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)
|| !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
{
selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK), Sel, iSReg);
return;
}
}
else
{
/** @todo this shall not happen, we shall check for these things when executing
* LLDT */
AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->ldtr.u32Limit);
pShwDesc = (PCX86DESC)((uintptr_t)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper + (Sel & X86_SEL_MASK));
if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT)
|| !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
{
selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK), Sel, iSReg);
return;
}
}
/*
* All fine, load it.
*/
selmLoadHiddenSRegFromShadowDesc(pSReg, pShwDesc);
STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelShw);
Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (shw)\n",
g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
}
#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
/**
* Validates and converts a GC selector based code address to a flat
* address when in real or v8086 mode.
*
* @returns VINF_SUCCESS.
* @param pVCpu The cross context virtual CPU structure.
* @param SelCS Selector part.
* @param pSReg The hidden CS register part. Optional.
* @param Addr Address part.
* @param ppvFlat Where to store the flat address.
*/
DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVMCPU pVCpu, RTSEL SelCS, PCCPUMSELREGHID pSReg, RTGCPTR Addr,
PRTGCPTR ppvFlat)
{
NOREF(pVCpu);
uint32_t uFlat = Addr & 0xffff;
if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
uFlat += (uint32_t)SelCS << 4;
else
uFlat += (uint32_t)pSReg->u64Base;
*ppvFlat = uFlat;
return VINF_SUCCESS;
}
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
/**
* Validates and converts a GC selector based code address to a flat address
* when in protected/long mode using the raw-mode algorithm.
*
* @returns VBox status code.
* @param pVM The cross context VM structure.
* @param pVCpu The cross context virtual CPU structure.
* @param SelCPL Current privilege level. Get this from SS - CS might be
* conforming! A full selector can be passed, we'll only
* use the RPL part.
* @param SelCS Selector part.
* @param Addr Address part.
* @param ppvFlat Where to store the flat address.
* @param pcBits Where to store the segment bitness (16/32/64). Optional.
*/
DECLINLINE(int) selmValidateAndConvertCSAddrRawMode(PVM pVM, PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr,
PRTGCPTR ppvFlat, uint32_t *pcBits)
{
NOREF(pVCpu);
Assert(VM_IS_RAW_MODE_ENABLED(pVM));
/** @todo validate limit! */
X86DESC Desc;
if (!(SelCS & X86_SEL_LDT))
Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];
else
{
/** @todo handle LDT page(s) not present! */
PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
Desc = paLDT[SelCS >> X86_SEL_SHIFT];
}
/*
* Check if present.
*/
if (Desc.Gen.u1Present)
{
/*
* Type check.
*/
if ( Desc.Gen.u1DescType == 1
&& (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
{
/*
* Check level.
*/
unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
if ( !(Desc.Gen.u4Type & X86_SEL_TYPE_CONF)
? uLevel <= Desc.Gen.u2Dpl
: uLevel >= Desc.Gen.u2Dpl /* hope I got this right now... */
)
{
/*
* Limit check.
*/
uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
if ((RTGCUINTPTR)Addr <= u32Limit)
{
*ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc));
/* Cut the address to 32 bits. */
*ppvFlat &= 0xffffffff;
if (pcBits)
*pcBits = Desc.Gen.u1DefBig ? 32 : 16; /** @todo GUEST64 */
return VINF_SUCCESS;
}
return VERR_OUT_OF_SELECTOR_BOUNDS;
}
return VERR_INVALID_RPL;
}
return VERR_NOT_CODE_SELECTOR;
}
return VERR_SELECTOR_NOT_PRESENT;
}
#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
/**
* Validates and converts a GC selector based code address to a flat address
* when in protected/long mode using the standard hidden selector registers
*
* @returns VBox status code.
* @param pVCpu The cross context virtual CPU structure.
* @param SelCPL Current privilege level. Get this from SS - CS might be
* conforming! A full selector can be passed, we'll only
* use the RPL part.
* @param SelCS Selector part.
* @param pSRegCS The full CS selector register.
* @param Addr The address (think IP/EIP/RIP).
* @param ppvFlat Where to store the flat address upon successful return.
*/
DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pSRegCS,
RTGCPTR Addr, PRTGCPTR ppvFlat)
{
NOREF(SelCPL); NOREF(SelCS);
/*
* Check if present.
*/
if (pSRegCS->Attr.n.u1Present)
{
/*
* Type check.
*/
if ( pSRegCS->Attr.n.u1DescType == 1
&& (pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CODE))
{
/* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
(Intel� 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
if ( pSRegCS->Attr.n.u1Long
&& CPUMIsGuestInLongMode(pVCpu))
{
*ppvFlat = Addr;
return VINF_SUCCESS;
}
/*
* Limit check. Note that the limit in the hidden register is the
* final value. The granularity bit was included in its calculation.
*/
uint32_t u32Limit = pSRegCS->u32Limit;
if ((uint32_t)Addr <= u32Limit)
{
*ppvFlat = (uint32_t)Addr + (uint32_t)pSRegCS->u64Base;
return VINF_SUCCESS;
}
return VERR_OUT_OF_SELECTOR_BOUNDS;
}
return VERR_NOT_CODE_SELECTOR;
}
return VERR_SELECTOR_NOT_PRESENT;
}
/**
* Validates and converts a GC selector based code address to a flat address.
*
* @returns VBox status code.
* @param pVCpu The cross context virtual CPU structure.
* @param Efl Current EFLAGS.
* @param SelCPL Current privilege level. Get this from SS - CS might be
* conforming! A full selector can be passed, we'll only
* use the RPL part.
* @param SelCS Selector part.
* @param pSRegCS The full CS selector register.
* @param Addr The address (think IP/EIP/RIP).
* @param ppvFlat Where to store the flat address upon successful return.
*/
VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS Efl, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREG pSRegCS,
RTGCPTR Addr, PRTGCPTR ppvFlat)
{
if ( Efl.Bits.u1VM
|| CPUMIsGuestInRealMode(pVCpu))
return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, pSRegCS, Addr, ppvFlat);
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
/* Use the hidden registers when possible, updating them if outdate. */
if (!pSRegCS)
return selmValidateAndConvertCSAddrRawMode(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL);
if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS))
CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS);
/* Undo ring compression. */
if ((SelCPL & X86_SEL_RPL) == 1 && VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
SelCPL &= ~X86_SEL_RPL;
Assert(pSRegCS->Sel == SelCS);
if ((SelCS & X86_SEL_RPL) == 1 && VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
SelCS &= ~X86_SEL_RPL;
#else
Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS));
Assert(pSRegCS->Sel == SelCS);
#endif
return selmValidateAndConvertCSAddrHidden(pVCpu, SelCPL, SelCS, pSRegCS, Addr, ppvFlat);
}
/**
* Returns Hypervisor's Trap 08 (\#DF) selector.
*
* @returns Hypervisor's Trap 08 (\#DF) selector.
* @param pVM The cross context VM structure.
*/
VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM)
{
return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
}
/**
* Sets EIP of Hypervisor's Trap 08 (\#DF) TSS.
*
* @param pVM The cross context VM structure.
* @param u32EIP EIP of Trap 08 handler.
*/
VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP)
{
pVM->selm.s.TssTrap08.eip = u32EIP;
}
/**
* Sets ss:esp for ring1 in main Hypervisor's TSS.
*
* @param pVM The cross context VM structure.
* @param ss Ring1 SS register value. Pass 0 if invalid.
* @param esp Ring1 ESP register value.
*/
void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
{
Assert(VM_IS_RAW_MODE_ENABLED(pVM));
Assert((ss & 1) || esp == 0);
pVM->selm.s.Tss.ss1 = ss;
pVM->selm.s.Tss.esp1 = (uint32_t)esp;
}
#ifdef VBOX_WITH_RAW_RING1
/**
* Sets ss:esp for ring1 in main Hypervisor's TSS.
*
* @param pVM The cross context VM structure.
* @param ss Ring2 SS register value. Pass 0 if invalid.
* @param esp Ring2 ESP register value.
*/
void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
{
Assert(VM_IS_RAW_MODE_ENABLED(pVM));
Assert((ss & 3) == 2 || esp == 0);
pVM->selm.s.Tss.ss2 = ss;
pVM->selm.s.Tss.esp2 = (uint32_t)esp;
}
#endif
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
/**
* Gets ss:esp for ring1 in main Hypervisor's TSS.
*
* Returns SS=0 if the ring-1 stack isn't valid.
*
* @returns VBox status code.
* @param pVM The cross context VM structure.
* @param pSS Ring1 SS register value.
* @param pEsp Ring1 ESP register value.
*/
VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
{
Assert(VM_IS_RAW_MODE_ENABLED(pVM));
Assert(pVM->cCpus == 1);
PVMCPU pVCpu = &pVM->aCpus[0];
#ifdef SELM_TRACK_GUEST_TSS_CHANGES
if (pVM->selm.s.fSyncTSSRing0Stack)
{
#endif
RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
int rc;
VBOXTSS tss;
Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);
# ifdef IN_RC
bool fTriedAlready = false;
l_tryagain:
PVBOXTSS pTss = (PVBOXTSS)(uintptr_t)GCPtrTss;
rc = MMGCRamRead(pVM, &tss.ss0, &pTss->ss0, sizeof(tss.ss0));
rc |= MMGCRamRead(pVM, &tss.esp0, &pTss->esp0, sizeof(tss.esp0));
# ifdef DEBUG
rc |= MMGCRamRead(pVM, &tss.offIoBitmap, &pTss->offIoBitmap, sizeof(tss.offIoBitmap));
# endif
if (RT_FAILURE(rc))
{
if (!fTriedAlready)
{
/* Shadow page might be out of sync. Sync and try again */
/** @todo might cross page boundary */
fTriedAlready = true;
rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPtrTss);
if (rc != VINF_SUCCESS)
return rc;
goto l_tryagain;
}
AssertMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
return rc;
}
# else /* !IN_RC */
/* Reading too much. Could be cheaper than two separate calls though. */
rc = PGMPhysSimpleReadGCPtr(pVCpu, &tss, GCPtrTss, sizeof(VBOXTSS));
if (RT_FAILURE(rc))
{
AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
return rc;
}
# endif /* !IN_RC */
# ifdef LOG_ENABLED
uint32_t ssr0 = pVM->selm.s.Tss.ss1;
uint32_t espr0 = pVM->selm.s.Tss.esp1;
ssr0 &= ~1;
if (ssr0 != tss.ss0 || espr0 != tss.esp0)
Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
# endif
/* Update our TSS structure for the guest's ring 1 stack */
selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
pVM->selm.s.fSyncTSSRing0Stack = false;
#ifdef SELM_TRACK_GUEST_TSS_CHANGES
}
#endif
*pSS = pVM->selm.s.Tss.ss1;
*pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1;
return VINF_SUCCESS;
}
#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && defined(VBOX_WITH_64_BITS_GUESTS))
/**
* Gets the hypervisor code selector (CS).
* @returns CS selector.
* @param pVM The cross context VM structure.
*/
VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM)
{
return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
}
/**
* Gets the 64-mode hypervisor code selector (CS64).
* @returns CS selector.
* @param pVM The cross context VM structure.
*/
VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM)
{
return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64];
}
/**
* Gets the hypervisor data selector (DS).
* @returns DS selector.
* @param pVM The cross context VM structure.
*/
VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM)
{
return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
}
/**
* Gets the hypervisor TSS selector.
* @returns TSS selector.
* @param pVM The cross context VM structure.
*/
VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM)
{
return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS];
}
/**
* Gets the hypervisor TSS Trap 8 selector.
* @returns TSS Trap 8 selector.
* @param pVM The cross context VM structure.
*/
VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM)
{
return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
}
/**
* Gets the address for the hypervisor GDT.
*
* @returns The GDT address.
* @param pVM The cross context VM structure.
* @remark This is intended only for very special use, like in the world
* switchers. Don't exploit this API!
*/
VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM)
{
/*
* Always convert this from the HC pointer since we can be
* called before the first relocation and have to work correctly
* without having dependencies on the relocation order.
*/
return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3);
}
#endif /* defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && defined(VBOX_WITH_64_BITS_GUESTS)) */
/**
* Gets info about the current TSS.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if we've got a TSS loaded.
* @retval VERR_SELM_NO_TSS if we haven't got a TSS (rather unlikely).
*
* @param pVM The cross context VM structure.
* @param pVCpu The cross context virtual CPU structure.
* @param pGCPtrTss Where to store the TSS address.
* @param pcbTss Where to store the TSS size limit.
* @param pfCanHaveIOBitmap Where to store the can-have-I/O-bitmap indicator. (optional)
*/
VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap)
{
NOREF(pVM);
/*
* The TR hidden register is always valid.
*/
CPUMSELREGHID trHid;
RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid);
if (!(tr & X86_SEL_MASK_OFF_RPL))
return VERR_SELM_NO_TSS;
*pGCPtrTss = trHid.u64Base;
*pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */
if (pfCanHaveIOBitmap)
*pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
|| trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
return VINF_SUCCESS;
}
/**
* Notification callback which is called whenever there is a chance that a CR3
* value might have changed.
* This is called by PGM.
*
* @param pVM The cross context VM structure.
* @param pVCpu The cross context virtual CPU structure.
*/
VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu)
{
/** @todo SMP support!! (64-bit guest scenario, primarily) */
pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu);
}
|