summaryrefslogtreecommitdiffstats
path: root/src/VBox/VMM
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 22:55:52 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 22:55:52 +0000
commitcd47c2446f1a9dee96610f298989848f8986a8be (patch)
tree02c30d62a9164987d0aaba2f72c58a50053205d6 /src/VBox/VMM
parentReleasing progress-linux version 7.0.14-dfsg-4~progress7.99u1. (diff)
downloadvirtualbox-cd47c2446f1a9dee96610f298989848f8986a8be.tar.xz
virtualbox-cd47c2446f1a9dee96610f298989848f8986a8be.zip
Merging upstream version 7.0.16-dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/VBox/VMM')
-rw-r--r--src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp30
-rw-r--r--src/VBox/VMM/VMMAll/IOMAllMmioNew.cpp2
-rw-r--r--src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h17
-rw-r--r--src/VBox/VMM/VMMAll/PGMAllPhys.cpp5
-rw-r--r--src/VBox/VMM/VMMAll/PGMAllPool.cpp29
-rw-r--r--src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h2
-rw-r--r--src/VBox/VMM/VMMR0/HMVMXR0.cpp21
-rw-r--r--src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp47
-rwxr-xr-x[-rw-r--r--]src/VBox/VMM/testcase/Instructions/itgTableDaa.py0
-rwxr-xr-x[-rw-r--r--]src/VBox/VMM/testcase/Instructions/itgTableDas.py0
-rw-r--r--src/VBox/VMM/testcase/Makefile.kmk1
11 files changed, 135 insertions, 19 deletions
diff --git a/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp b/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
index 29a4e52d..ab8ba45c 100644
--- a/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
+++ b/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
@@ -1731,7 +1731,8 @@ static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Amd64SyscallFlagMask(PVMCPUCC pVCpu,
static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_Amd64SyscallFlagMask(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue)
{
RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
- pVCpu->cpum.s.Guest.msrSFMASK = uValue;
+ /* The high bits are ignored and read-as-zero, writing to them does not raise #GP. See @bugref{10610}.*/
+ pVCpu->cpum.s.Guest.msrSFMASK = uValue & UINT32_MAX;
return VINF_SUCCESS;
}
@@ -1749,8 +1750,13 @@ static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Amd64FsBase(PVMCPUCC pVCpu, uint32_t
static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_Amd64FsBase(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue)
{
RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
- pVCpu->cpum.s.Guest.fs.u64Base = uValue;
- return VINF_SUCCESS;
+ if (X86_IS_CANONICAL(uValue))
+ {
+ pVCpu->cpum.s.Guest.fs.u64Base = uValue;
+ return VINF_SUCCESS;
+ }
+ Log(("CPUM: wrmsr %s(%#x), %#llx -> #GP - not canonical\n", pRange->szName, idMsr, uValue));
+ return VERR_CPUM_RAISE_GP_0;
}
@@ -1766,8 +1772,13 @@ static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Amd64GsBase(PVMCPUCC pVCpu, uint32_t
static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_Amd64GsBase(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue)
{
RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
- pVCpu->cpum.s.Guest.gs.u64Base = uValue;
- return VINF_SUCCESS;
+ if (X86_IS_CANONICAL(uValue))
+ {
+ pVCpu->cpum.s.Guest.gs.u64Base = uValue;
+ return VINF_SUCCESS;
+ }
+ Log(("CPUM: wrmsr %s(%#x), %#llx -> #GP - not canonical\n", pRange->szName, idMsr, uValue));
+ return VERR_CPUM_RAISE_GP_0;
}
@@ -1784,8 +1795,13 @@ static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Amd64KernelGsBase(PVMCPUCC pVCpu, ui
static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_Amd64KernelGsBase(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue)
{
RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
- pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
- return VINF_SUCCESS;
+ if (X86_IS_CANONICAL(uValue))
+ {
+ pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
+ return VINF_SUCCESS;
+ }
+ Log(("CPUM: wrmsr %s(%#x), %#llx -> #GP - not canonical\n", pRange->szName, idMsr, uValue));
+ return VERR_CPUM_RAISE_GP_0;
}
diff --git a/src/VBox/VMM/VMMAll/IOMAllMmioNew.cpp b/src/VBox/VMM/VMMAll/IOMAllMmioNew.cpp
index c737bf1f..4a924f78 100644
--- a/src/VBox/VMM/VMMAll/IOMAllMmioNew.cpp
+++ b/src/VBox/VMM/VMMAll/IOMAllMmioNew.cpp
@@ -448,7 +448,7 @@ static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, CTX_SUFF(PIOMMMIOENTRY) pR
/*
* Do DWORD read from the device.
*/
- uint32_t u32Value;
+ uint32_t u32Value = 0;
VBOXSTRICTRC rcStrict2 = pRegEntry->pfnReadCallback(pRegEntry->pDevIns, pRegEntry->pvUser,
!(pRegEntry->fFlags & IOMMMIO_FLAGS_ABS)
? offRegion & ~(RTGCPHYS)3 : GCPhys & ~(RTGCPHYS)3,
diff --git a/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h b/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h
index 410abae1..56895dfb 100644
--- a/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h
+++ b/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h
@@ -35,16 +35,25 @@
* @returns @c true if valid, @c false otherwise.
* @param pVCpu The cross context virtual CPU structure of the calling EMT.
* @param uEntry The EPT page table entry to check.
+ *
+ * @remarks Current this ASSUMES @c uEntry is present (debug asserted)!
*/
DECLINLINE(bool) PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(PCVMCPUCC pVCpu, uint64_t uEntry)
{
if (!(uEntry & EPT_E_READ))
{
- Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
- Assert(!RT_BF_GET(pVCpu->pgm.s.uEptVpidCapMsr, VMX_BF_EPT_VPID_CAP_EXEC_ONLY));
- NOREF(pVCpu);
- if (uEntry & (EPT_E_WRITE | EPT_E_EXECUTE))
+ if (uEntry & EPT_E_WRITE)
return false;
+
+ /*
+ * Currently all callers of this function check for the present mask prior
+ * to calling this function. Hence, the execute bit must be set now.
+ */
+ Assert(uEntry & EPT_E_EXECUTE);
+ Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
+ if (pVCpu->pgm.s.uEptVpidCapMsr & VMX_BF_EPT_VPID_CAP_EXEC_ONLY_MASK)
+ return true;
+ return false;
}
return true;
}
diff --git a/src/VBox/VMM/VMMAll/PGMAllPhys.cpp b/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
index 457f7de0..9aa351d9 100644
--- a/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
+++ b/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
@@ -2504,6 +2504,11 @@ static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhy
/* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
PGM_UNLOCK(pVM);
+ /* If the access origins with a device, make sure the buffer is initialized
+ as a guard against leaking heap, stack and other info via badly written
+ MMIO handling. @bugref{10651} */
+ if (enmOrigin == PGMACCESSORIGIN_DEVICE)
+ memset(pvBuf, 0xff, cb);
rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, uUser);
PGM_LOCK_VOID(pVM);
diff --git a/src/VBox/VMM/VMMAll/PGMAllPool.cpp b/src/VBox/VMM/VMMAll/PGMAllPool.cpp
index 59f946e5..aec01623 100644
--- a/src/VBox/VMM/VMMAll/PGMAllPool.cpp
+++ b/src/VBox/VMM/VMMAll/PGMAllPool.cpp
@@ -4774,6 +4774,31 @@ DECLINLINE(void) pgmPoolTrackDerefNestedPDEpt(PPGMPOOL pPool, PPGMPOOLPAGE pPage
}
}
+
+/**
+ * Clear references to shadowed pages in a SLAT EPT PML4 table.
+ *
+ * @param pPool The pool.
+ * @param pPage The page.
+ * @param pShwPml4 The shadow PML4 table.
+ */
+DECLINLINE(void) pgmPoolTrackDerefNestedPML4(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPML4 pShwPml4)
+{
+ Assert(PGMPOOL_PAGE_IS_NESTED(pPage));
+ for (unsigned i = 0; i < RT_ELEMENTS(pShwPml4->a); i++)
+ {
+ X86PGPAEUINT const uPml4e = pShwPml4->a[i].u;
+ AssertMsg((uPml4e & (EPT_PML4E_MBZ_MASK | 0xfff0000000000f00)) == 0, ("uPml4e=%RX64\n", uPml4e));
+ if (uPml4e & EPT_PRESENT_MASK)
+ {
+ PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, uPml4e & EPT_PML4E_PG_MASK);
+ if (pSubPage)
+ pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
+ else
+ AssertFatalMsgFailed(("%RX64\n", uPml4e & X86_PML4E_PG_MASK));
+ }
+ }
+}
#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
@@ -5122,6 +5147,10 @@ static void pgmPoolTrackDeref(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT:
pgmPoolTrackDerefPDPTEPT(pPool, pPage, (PEPTPDPT)pvShw);
break;
+
+ case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4:
+ pgmPoolTrackDerefNestedPML4(pPool, pPage, (PEPTPML4)pvShw);
+ break;
#endif
default:
diff --git a/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h b/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h
index 85db8a47..efd44121 100644
--- a/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h
+++ b/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h
@@ -9428,6 +9428,8 @@ HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransien
/*
* Frequent exit or something needing probing. Call EMHistoryExec.
*/
+ int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL, IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
+ AssertRCReturn(rc2, rc2);
Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
diff --git a/src/VBox/VMM/VMMR0/HMVMXR0.cpp b/src/VBox/VMM/VMMR0/HMVMXR0.cpp
index 26b6252e..0bb3b490 100644
--- a/src/VBox/VMM/VMMR0/HMVMXR0.cpp
+++ b/src/VBox/VMM/VMMR0/HMVMXR0.cpp
@@ -382,13 +382,20 @@ static void hmR0VmxLazyLoadGuestMsrs(PVMCPUCC pVCpu)
}
else
{
- ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE);
- ASMWrMsr(MSR_K8_LSTAR, pCtx->msrLSTAR);
- ASMWrMsr(MSR_K6_STAR, pCtx->msrSTAR);
- /* The system call flag mask register isn't as benign and accepting of all
- values as the above, so mask it to avoid #GP'ing on corrupted input. */
- Assert(!(pCtx->msrSFMASK & ~(uint64_t)UINT32_MAX));
- ASMWrMsr(MSR_K8_SF_MASK, pCtx->msrSFMASK & UINT32_MAX);
+ /* Avoid raising #GP caused by writing illegal values to these MSRs. */
+ if ( X86_IS_CANONICAL(pCtx->msrKERNELGSBASE)
+ && X86_IS_CANONICAL(pCtx->msrLSTAR))
+ {
+ ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE);
+ ASMWrMsr(MSR_K8_LSTAR, pCtx->msrLSTAR);
+ ASMWrMsr(MSR_K6_STAR, pCtx->msrSTAR);
+ /* The system call flag mask register isn't as benign and accepting of all
+ values as the above, so mask it to avoid #GP'ing on corrupted input. */
+ Assert(!(pCtx->msrSFMASK & ~(uint64_t)UINT32_MAX));
+ ASMWrMsr(MSR_K8_SF_MASK, pCtx->msrSFMASK & UINT32_MAX);
+ }
+ else
+ AssertMsgFailed(("Incompatible lazily-loaded guest MSR values\n"));
}
}
pVCpu->hmr0.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
diff --git a/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp b/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
index a2288250..04d8ac3c 100644
--- a/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
+++ b/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
@@ -2233,6 +2233,53 @@ static int cpumR3CpuIdSanitize(PVM pVM, PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
* 0x80000006 L2 cache information
*/
+ uSubLeaf = 0;
+ while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000006), uSubLeaf)) != NULL)
+ {
+ if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
+ || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
+ {
+ /*
+ * Some AMD CPUs (e.g. Ryzen 7940HS) report zero L3 cache line size here and refer
+ * to CPUID Fn8000_001D. This triggers division by zero in Linux if the
+ * TopologyExtensions aka TOPOEXT bit in Fn8000_0001_ECX is not set, or if the kernel
+ * is old enough (e.g. Linux 3.13) that it does not know about the topology extension
+ * CPUID leaves.
+ * We put a non-zero value in the cache line size here, if possible the actual value
+ * gleaned from Fn8000_001D, or worst case a made-up valid number.
+ */
+ PCPUMCPUIDLEAF pTopoLeaf;
+ uint32_t uTopoSubLeaf;
+ uint32_t uCacheLineSize;
+
+ if ((pCurLeaf->uEdx & 0xff) == 0)
+ {
+ uTopoSubLeaf = 0;
+
+ uCacheLineSize = 64; /* Use 64-byte line size as a fallback. */
+
+ /* Find L3 cache information. Have to check the cache level in EAX. */
+ while ((pTopoLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001d), uTopoSubLeaf)) != NULL)
+ {
+ if (((pTopoLeaf->uEax >> 5) & 0x07) == 3) {
+ uCacheLineSize = (pTopoLeaf->uEbx & 0xfff) + 1;
+ /* Fn8000_0006 can't report power of two line sizes greater than 128. */
+ if (uCacheLineSize > 128)
+ uCacheLineSize = 128;
+
+ break;
+ }
+ uTopoSubLeaf++;
+ }
+
+ Assert(uCacheLineSize < 256);
+ pCurLeaf->uEdx |= uCacheLineSize;
+ LogRel(("CPUM: AMD L3 cache line size in CPUID leaf 0x80000006 was zero, adjusting to %u\n", uCacheLineSize));
+ }
+ }
+ uSubLeaf++;
+ }
+
/* Cpuid 0x80000007: Advanced Power Management Information.
* AMD: EAX: Processor feedback capabilities.
* EBX: RAS capabilites.
diff --git a/src/VBox/VMM/testcase/Instructions/itgTableDaa.py b/src/VBox/VMM/testcase/Instructions/itgTableDaa.py
index 2606d99b..2606d99b 100644..100755
--- a/src/VBox/VMM/testcase/Instructions/itgTableDaa.py
+++ b/src/VBox/VMM/testcase/Instructions/itgTableDaa.py
diff --git a/src/VBox/VMM/testcase/Instructions/itgTableDas.py b/src/VBox/VMM/testcase/Instructions/itgTableDas.py
index 96557cfd..96557cfd 100644..100755
--- a/src/VBox/VMM/testcase/Instructions/itgTableDas.py
+++ b/src/VBox/VMM/testcase/Instructions/itgTableDas.py
diff --git a/src/VBox/VMM/testcase/Makefile.kmk b/src/VBox/VMM/testcase/Makefile.kmk
index 43cb7b40..3028aaf1 100644
--- a/src/VBox/VMM/testcase/Makefile.kmk
+++ b/src/VBox/VMM/testcase/Makefile.kmk
@@ -800,6 +800,7 @@ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsHC.h: $(VBOX_VMM_TESTCASE_OUT_DIR)/tst
-e '/VMMCPU_size$$/d' \
-e '/SUPDRVTRACERUSRCTX32_size$$/d' \
-e '/HMCPU_size$$/d' \
+ -e '/SUPGLOBALINFOPAGE_size$$/d' \
\
-e '/^\(0x\)\{0,1\}00[0-9a-fA-F]* [aAnN] [^_.]*_size$$/!d' \
-e 's/^\(0x\)\{0,1\}\(00[0-9a-fA-F]*\) [aAnN] \([^_.]*\)_size/ CHECK_SIZE(\3, 0x0\2);/' \