summaryrefslogtreecommitdiffstats
path: root/src/VBox/Runtime/r0drv/solaris
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 14:19:18 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 14:19:18 +0000
commit4035b1bfb1e5843a539a8b624d21952b756974d1 (patch)
treef1e9cd5bf548cbc57ff2fddfb2b4aa9ae95587e2 /src/VBox/Runtime/r0drv/solaris
parentInitial commit. (diff)
downloadvirtualbox-upstream.tar.xz
virtualbox-upstream.zip
Adding upstream version 6.1.22-dfsg.upstream/6.1.22-dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/VBox/Runtime/r0drv/solaris')
-rw-r--r--src/VBox/Runtime/r0drv/solaris/Makefile.kup0
-rw-r--r--src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c66
-rw-r--r--src/VBox/Runtime/r0drv/solaris/RTMpPokeCpu-r0drv-solaris.c50
-rw-r--r--src/VBox/Runtime/r0drv/solaris/alloc-r0drv-solaris.c206
-rw-r--r--src/VBox/Runtime/r0drv/solaris/assert-r0drv-solaris.c77
-rw-r--r--src/VBox/Runtime/r0drv/solaris/dbgkrnlinfo-r0drv-solaris.c339
-rw-r--r--src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c300
-rw-r--r--src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c1172
-rw-r--r--src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.h322
-rw-r--r--src/VBox/Runtime/r0drv/solaris/memuserkernel-r0drv-solaris.c100
-rw-r--r--src/VBox/Runtime/r0drv/solaris/modulestub-r0drv-solaris.c79
-rw-r--r--src/VBox/Runtime/r0drv/solaris/mp-r0drv-solaris.c450
-rw-r--r--src/VBox/Runtime/r0drv/solaris/mpnotification-r0drv-solaris.c139
-rw-r--r--src/VBox/Runtime/r0drv/solaris/process-r0drv-solaris.c49
-rw-r--r--src/VBox/Runtime/r0drv/solaris/semevent-r0drv-solaris.c347
-rw-r--r--src/VBox/Runtime/r0drv/solaris/semeventmulti-r0drv-solaris.c355
-rw-r--r--src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h496
-rw-r--r--src/VBox/Runtime/r0drv/solaris/semfastmutex-r0drv-solaris.c120
-rw-r--r--src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c387
-rw-r--r--src/VBox/Runtime/r0drv/solaris/spinlock-r0drv-solaris.c204
-rw-r--r--src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h234
-rw-r--r--src/VBox/Runtime/r0drv/solaris/thread-r0drv-solaris.c185
-rw-r--r--src/VBox/Runtime/r0drv/solaris/thread2-r0drv-solaris.c150
-rw-r--r--src/VBox/Runtime/r0drv/solaris/threadctxhooks-r0drv-solaris.c349
-rw-r--r--src/VBox/Runtime/r0drv/solaris/time-r0drv-solaris.c70
-rw-r--r--src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c650
26 files changed, 6896 insertions, 0 deletions
diff --git a/src/VBox/Runtime/r0drv/solaris/Makefile.kup b/src/VBox/Runtime/r0drv/solaris/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/Makefile.kup
diff --git a/src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c
new file mode 100644
index 00000000..46aa8aed
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c
@@ -0,0 +1,66 @@
+/* $Id: RTLogWriteDebugger-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Log To Debugger, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/log.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+
+
+
+RTDECL(void) RTLogWriteDebugger(const char *pch, size_t cb)
+{
+ if (pch[cb] != '\0')
+ AssertBreakpoint();
+
+ /* cmn_err() acquires adaptive mutexes. Not preemption safe, see @bugref{6657}. */
+ if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
+ return;
+
+ if ( !g_frtSolSplSetsEIF
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ || ASMIntAreEnabled()
+#else
+/* PORTME: Check if interrupts are enabled, if applicable. */
+#endif
+ )
+ {
+ cmn_err(CE_CONT, pch);
+ }
+
+ return;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/RTMpPokeCpu-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/RTMpPokeCpu-r0drv-solaris.c
new file mode 100644
index 00000000..8c16b1f6
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/RTMpPokeCpu-r0drv-solaris.c
@@ -0,0 +1,50 @@
+/* $Id: RTMpPokeCpu-r0drv-solaris.c $ */
+/** @file
+ * IPRT - RTMpPokeCpu, Solaris Implementation.
+ */
+
+/*
+ * Copyright (C) 2009-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mp.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+
+
+
+RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
+{
+ RT_ASSERT_INTS_ON();
+ if (idCpu < ncpus)
+ poke_cpu(idCpu);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/alloc-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/alloc-r0drv-solaris.c
new file mode 100644
index 00000000..0cfb60c8
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/alloc-r0drv-solaris.c
@@ -0,0 +1,206 @@
+/* $Id: alloc-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/log.h>
+#include <iprt/param.h>
+#include <iprt/thread.h>
+#include "r0drv/alloc-r0drv.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+static ddi_dma_attr_t s_rtR0SolDmaAttr =
+{
+ DMA_ATTR_V0, /* Version Number */
+ (uint64_t)0, /* Lower limit */
+ (uint64_t)0, /* High limit */
+ (uint64_t)0xffffffff, /* Counter limit */
+ (uint64_t)PAGESIZE, /* Alignment */
+ (uint64_t)PAGESIZE, /* Burst size */
+ (uint64_t)PAGESIZE, /* Effective DMA size */
+ (uint64_t)0xffffffff, /* Max DMA xfer size */
+ (uint64_t)0xffffffff, /* Segment boundary */
+ 1, /* Scatter-gather list length (1 for contiguous) */
+ 1, /* Device granularity */
+ 0 /* Bus-specific flags */
+};
+
+extern void *contig_alloc(size_t cb, ddi_dma_attr_t *pDmaAttr, size_t uAlign, int fCanSleep);
+
+
+/**
+ * OS specific allocation function.
+ */
+DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
+{
+ size_t cbAllocated = cb;
+ PRTMEMHDR pHdr;
+
+#ifdef RT_ARCH_AMD64
+ if (fFlags & RTMEMHDR_FLAG_EXEC)
+ {
+ AssertReturn(!(fFlags & RTMEMHDR_FLAG_ANY_CTX), VERR_NOT_SUPPORTED);
+ cbAllocated = RT_ALIGN_Z(cb + sizeof(*pHdr), PAGE_SIZE) - sizeof(*pHdr);
+ pHdr = (PRTMEMHDR)segkmem_alloc(heaptext_arena, cbAllocated + sizeof(*pHdr), KM_SLEEP);
+ }
+ else
+#endif
+ {
+ unsigned fKmFlags = fFlags & RTMEMHDR_FLAG_ANY_CTX_ALLOC ? KM_NOSLEEP : KM_SLEEP;
+ if (fFlags & RTMEMHDR_FLAG_ZEROED)
+ pHdr = (PRTMEMHDR)kmem_zalloc(cb + sizeof(*pHdr), fKmFlags);
+ else
+ pHdr = (PRTMEMHDR)kmem_alloc(cb + sizeof(*pHdr), fKmFlags);
+ }
+ if (RT_UNLIKELY(!pHdr))
+ {
+ LogRel(("rtMemAllocEx(%u, %#x) failed\n", (unsigned)cb + sizeof(*pHdr), fFlags));
+ return VERR_NO_MEMORY;
+ }
+
+ pHdr->u32Magic = RTMEMHDR_MAGIC;
+ pHdr->fFlags = fFlags;
+ pHdr->cb = cbAllocated;
+ pHdr->cbReq = cb;
+
+ *ppHdr = pHdr;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * OS specific free function.
+ */
+DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr)
+{
+ pHdr->u32Magic += 1;
+#ifdef RT_ARCH_AMD64
+ if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC)
+ segkmem_free(heaptext_arena, pHdr, pHdr->cb + sizeof(*pHdr));
+ else
+#endif
+ kmem_free(pHdr, pHdr->cb + sizeof(*pHdr));
+}
+
+
+/**
+ * Allocates physical memory which satisfy the given constraints.
+ *
+ * @param uPhysHi The upper physical address limit (inclusive).
+ * @param puPhys Where to store the physical address of the allocated
+ * memory. Optional, can be NULL.
+ * @param cb Size of allocation.
+ * @param uAlignment Alignment.
+ * @param fContig Whether the memory must be physically contiguous or
+ * not.
+ *
+ * @returns Virtual address of allocated memory block or NULL if allocation
+ * failed.
+ */
+DECLHIDDEN(void *) rtR0SolMemAlloc(uint64_t uPhysHi, uint64_t *puPhys, size_t cb, uint64_t uAlignment, bool fContig)
+{
+ if ((cb & PAGEOFFSET) != 0)
+ return NULL;
+
+ size_t cPages = (cb + PAGESIZE - 1) >> PAGESHIFT;
+ if (!cPages)
+ return NULL;
+
+ ddi_dma_attr_t DmaAttr = s_rtR0SolDmaAttr;
+ DmaAttr.dma_attr_addr_hi = uPhysHi;
+ DmaAttr.dma_attr_align = uAlignment;
+ if (!fContig)
+ DmaAttr.dma_attr_sgllen = cPages > INT_MAX ? INT_MAX - 1 : cPages;
+ else
+ AssertRelease(DmaAttr.dma_attr_sgllen == 1);
+
+ void *pvMem = contig_alloc(cb, &DmaAttr, PAGESIZE, 1 /* can sleep */);
+ if (!pvMem)
+ {
+ LogRel(("rtR0SolMemAlloc failed. cb=%u Align=%u fContig=%d\n", (unsigned)cb, (unsigned)uAlignment, fContig));
+ return NULL;
+ }
+
+ pfn_t PageFrameNum = hat_getpfnum(kas.a_hat, (caddr_t)pvMem);
+ AssertRelease(PageFrameNum != PFN_INVALID);
+ if (puPhys)
+ *puPhys = (uint64_t)PageFrameNum << PAGESHIFT;
+
+ return pvMem;
+}
+
+
+/**
+ * Frees memory allocated using rtR0SolMemAlloc().
+ *
+ * @param pv The memory to free.
+ * @param cb Size of the memory block
+ */
+DECLHIDDEN(void) rtR0SolMemFree(void *pv, size_t cb)
+{
+ if (RT_LIKELY(pv))
+ g_pfnrtR0Sol_contig_free(pv, cb);
+}
+
+
+RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
+{
+ AssertPtrReturn(pPhys, NULL);
+ AssertReturn(cb > 0, NULL);
+ RT_ASSERT_PREEMPTIBLE();
+
+ /* Allocate physically contiguous (< 4GB) page-aligned memory. */
+ uint64_t uPhys;
+ void *pvMem = rtR0SolMemAlloc((uint64_t)_4G - 1, &uPhys, cb, PAGESIZE, true /* fContig */);
+ if (RT_UNLIKELY(!pvMem))
+ {
+ LogRel(("RTMemContAlloc failed to allocate %u bytes\n", cb));
+ return NULL;
+ }
+
+ Assert(uPhys < _4G);
+ *pPhys = uPhys;
+ return pvMem;
+}
+
+
+RTR0DECL(void) RTMemContFree(void *pv, size_t cb)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ rtR0SolMemFree(pv, cb);
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/assert-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/assert-r0drv-solaris.c
new file mode 100644
index 00000000..0bab01c2
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/assert-r0drv-solaris.c
@@ -0,0 +1,77 @@
+/* $Id: assert-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Assertion Workers, Ring-0 Drivers, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/assert.h>
+
+#include <iprt/asm.h>
+#include <iprt/log.h>
+#include <iprt/stdarg.h>
+#include <iprt/string.h>
+
+#include "internal/assert.h"
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
+{
+ uprintf("\r\n!!Assertion Failed!!\r\n"
+ "Expression: %s\r\n"
+ "Location : %s(%d) %s\r\n",
+ pszExpr, pszFile, uLine, pszFunction);
+}
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg2V(bool fInitial, const char *pszFormat, va_list va)
+{
+ char szMsg[256];
+
+ RTStrPrintfV(szMsg, sizeof(szMsg) - 1, pszFormat, va);
+ szMsg[sizeof(szMsg) - 1] = '\0';
+ uprintf("%s", szMsg);
+
+ NOREF(fInitial);
+}
+
+
+RTR0DECL(void) RTR0AssertPanicSystem(void)
+{
+ const char *psz = &g_szRTAssertMsg2[0];
+ const char *pszEnd = &g_szRTAssertMsg2[sizeof(g_szRTAssertMsg2)];
+ while (psz < pszEnd && (*psz == ' ' || *psz == '\t' || *psz == '\n' || *psz == '\r'))
+ psz++;
+
+ if (psz < pszEnd && *psz)
+ assfail(psz, g_pszRTAssertFile, g_u32RTAssertLine);
+ else
+ assfail(g_szRTAssertMsg1, g_pszRTAssertFile, g_u32RTAssertLine);
+ g_szRTAssertMsg2[0] = '\0';
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/dbgkrnlinfo-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/dbgkrnlinfo-r0drv-solaris.c
new file mode 100644
index 00000000..896d22d6
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/dbgkrnlinfo-r0drv-solaris.c
@@ -0,0 +1,339 @@
+/* $Id: dbgkrnlinfo-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Kernel debug information, Ring-0 Driver, Solaris Code.
+ */
+
+/*
+ * Copyright (C) 2012-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/dbg.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/log.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Solaris kernel debug info instance data.
+ */
+typedef struct RTDBGKRNLINFOINT
+{
+ /** Magic value (RTDBGKRNLINFO_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The number of threads referencing this object. */
+ uint32_t volatile cRefs;
+ /** Pointer to the genunix CTF handle. */
+ ctf_file_t *pGenUnixCTF;
+ /** Pointer to the genunix module handle. */
+ modctl_t *pGenUnixMod;
+} RTDBGKRNLINFOINT;
+/** Pointer to the solaris kernel debug info instance data. */
+typedef struct RTDBGKRNLINFOINT *PRTDBGKRNLINFOINT;
+
+
+/**
+ * Retains a kernel module and opens the CTF data associated with it.
+ *
+ * @param pszModule The name of the module to open.
+ * @param ppMod Where to store the module handle.
+ * @param ppCTF Where to store the module's CTF handle.
+ *
+ * @return IPRT status code.
+ */
+static int rtR0DbgKrnlInfoModRetain(char *pszModule, modctl_t **ppMod, ctf_file_t **ppCTF)
+{
+ AssertPtrReturn(pszModule, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(ppMod, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(ppCTF, VERR_INVALID_PARAMETER);
+
+ int rc = VINF_SUCCESS;
+ modid_t ModId = mod_name_to_modid(pszModule);
+ if (ModId != -1)
+ {
+ *ppMod = mod_hold_by_id(ModId);
+ if (*ppMod)
+ {
+ /*
+ * Hold mod_lock as ctf_modopen may update the module with uncompressed CTF data.
+ */
+ int err;
+ mutex_enter(&mod_lock);
+ *ppCTF = ctf_modopen(((modctl_t *)*ppMod)->mod_mp, &err);
+ mutex_exit(&mod_lock);
+ mod_release_mod(*ppMod);
+
+ if (*ppCTF)
+ return VINF_SUCCESS;
+ else
+ {
+ LogRel(("rtR0DbgKrnlInfoModRetain: ctf_modopen failed for '%s' err=%d\n", pszModule, err));
+ rc = VERR_INTERNAL_ERROR_3;
+ }
+ }
+ else
+ {
+ LogRel(("rtR0DbgKrnlInfoModRetain: mod_hold_by_id failed for '%s'\n", pszModule));
+ rc = VERR_INTERNAL_ERROR_2;
+ }
+ }
+ else
+ {
+ LogRel(("rtR0DbgKrnlInfoModRetain: mod_name_to_modid failed for '%s'\n", pszModule));
+ rc = VERR_INTERNAL_ERROR;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Releases the kernel module and closes its CTF data.
+ *
+ * @param pMod Pointer to the module handle.
+ * @param pCTF Pointer to the module's CTF handle.
+ */
+static void rtR0DbgKrnlInfoModRelease(modctl_t *pMod, ctf_file_t *pCTF)
+{
+ AssertPtrReturnVoid(pMod);
+ AssertPtrReturnVoid(pCTF);
+
+ ctf_close(pCTF);
+}
+
+
+/**
+ * Helper for opening the specified kernel module.
+ *
+ * @param pszModule The name of the module.
+ * @param ppMod Where to store the module handle.
+ * @param ppCtf Where to store the module's CTF handle.
+ *
+ * @returns Pointer to the CTF structure for the module.
+ */
+static int rtR0DbgKrnlInfoModRetainEx(const char *pszModule, modctl_t **ppMod, ctf_file_t **ppCtf)
+{
+ char *pszMod = RTStrDup(pszModule);
+ if (RT_LIKELY(pszMod))
+ {
+ int rc = rtR0DbgKrnlInfoModRetain(pszMod, ppMod, ppCtf);
+ RTStrFree(pszMod);
+ if (RT_SUCCESS(rc))
+ {
+ AssertPtrReturn(*ppMod, VERR_INTERNAL_ERROR_2);
+ AssertPtrReturn(*ppCtf, VERR_INTERNAL_ERROR_3);
+ }
+ return rc;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoOpen(PRTDBGKRNLINFO phKrnlInfo, uint32_t fFlags)
+{
+ AssertReturn(fFlags == 0, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phKrnlInfo, VERR_INVALID_POINTER);
+ /* This can be called as part of IPRT init, in which case we have no thread preempt information yet. */
+ if (g_frtSolInitDone)
+ RT_ASSERT_PREEMPTIBLE();
+
+ *phKrnlInfo = NIL_RTDBGKRNLINFO;
+ PRTDBGKRNLINFOINT pThis = (PRTDBGKRNLINFOINT)RTMemAllocZ(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ char szGenUnixModName[] = "genunix";
+ int rc = rtR0DbgKrnlInfoModRetain(szGenUnixModName, &pThis->pGenUnixMod, &pThis->pGenUnixCTF);
+ if (RT_SUCCESS(rc))
+ {
+ pThis->u32Magic = RTDBGKRNLINFO_MAGIC;
+ pThis->cRefs = 1;
+
+ *phKrnlInfo = pThis;
+ return VINF_SUCCESS;
+ }
+
+ LogRel(("RTR0DbgKrnlInfoOpen: rtR0DbgKrnlInfoModRetain failed rc=%d.\n", rc));
+ RTMemFree(pThis);
+ return rc;
+}
+
+
+RTR0DECL(uint32_t) RTR0DbgKrnlInfoRetain(RTDBGKRNLINFO hKrnlInfo)
+{
+ PRTDBGKRNLINFOINT pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, UINT32_MAX);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+ return cRefs;
+}
+
+
+RTR0DECL(uint32_t) RTR0DbgKrnlInfoRelease(RTDBGKRNLINFO hKrnlInfo)
+{
+ PRTDBGKRNLINFOINT pThis = hKrnlInfo;
+ if (pThis == NIL_RTDBGKRNLINFO)
+ return 0;
+ AssertPtrReturn(pThis, UINT32_MAX);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), UINT32_MAX);
+ if (g_frtSolInitDone)
+ RT_ASSERT_PREEMPTIBLE();
+
+ uint32_t cRefs = ASMAtomicDecU32(&pThis->cRefs);
+ if (cRefs == 0)
+ {
+ pThis->u32Magic = ~RTDBGKRNLINFO_MAGIC;
+ rtR0DbgKrnlInfoModRelease(pThis->pGenUnixMod, pThis->pGenUnixCTF);
+ RTMemFree(pThis);
+ }
+ return cRefs;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoQueryMember(RTDBGKRNLINFO hKrnlInfo, const char *pszModule, const char *pszStructure,
+ const char *pszMember, size_t *poffMember)
+{
+ PRTDBGKRNLINFOINT pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertPtrReturn(pszMember, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pszStructure, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(poffMember, VERR_INVALID_PARAMETER);
+ if (g_frtSolInitDone)
+ RT_ASSERT_PREEMPTIBLE();
+
+ ctf_file_t *pCtf = NULL;
+ modctl_t *pMod = NULL;
+ if (!pszModule)
+ {
+ pCtf = pThis->pGenUnixCTF;
+ pMod = pThis->pGenUnixMod;
+ }
+ else
+ {
+ int rc2 = rtR0DbgKrnlInfoModRetainEx(pszModule, &pMod, &pCtf);
+ if (RT_FAILURE(rc2))
+ return rc2;
+ Assert(pMod);
+ Assert(pCtf);
+ }
+
+ int rc = VERR_NOT_FOUND;
+ ctf_id_t TypeIdent = ctf_lookup_by_name(pCtf, pszStructure);
+ if (TypeIdent != CTF_ERR)
+ {
+ ctf_membinfo_t MemberInfo;
+ RT_ZERO(MemberInfo);
+ if (ctf_member_info(pCtf, TypeIdent, pszMember, &MemberInfo) != CTF_ERR)
+ {
+ *poffMember = (MemberInfo.ctm_offset >> 3);
+ rc = VINF_SUCCESS;
+ }
+ }
+
+ if (pszModule)
+ rtR0DbgKrnlInfoModRelease(pMod, pCtf);
+ return rc;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoQuerySymbol(RTDBGKRNLINFO hKrnlInfo, const char *pszModule,
+ const char *pszSymbol, void **ppvSymbol)
+{
+ PRTDBGKRNLINFOINT pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertPtrReturn(pszSymbol, VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(ppvSymbol, VERR_INVALID_PARAMETER);
+ AssertReturn(!pszModule, VERR_MODULE_NOT_FOUND);
+ if (g_frtSolInitDone)
+ RT_ASSERT_PREEMPTIBLE();
+
+ uintptr_t uValue = kobj_getsymvalue((char *)pszSymbol, 1 /* only kernel */);
+ if (ppvSymbol)
+ *ppvSymbol = (void *)uValue;
+ if (uValue)
+ return VINF_SUCCESS;
+ return VERR_SYMBOL_NOT_FOUND;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoQuerySize(RTDBGKRNLINFO hKrnlInfo, const char *pszModule, const char *pszType, size_t *pcbType)
+{
+ PRTDBGKRNLINFOINT pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertPtrReturn(pszType, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pcbType, VERR_INVALID_PARAMETER);
+ if (g_frtSolInitDone)
+ RT_ASSERT_PREEMPTIBLE();
+
+ modctl_t *pMod = NULL;
+ ctf_file_t *pCtf = NULL;
+ if (!pszModule)
+ {
+ pCtf = pThis->pGenUnixCTF;
+ pMod = pThis->pGenUnixMod;
+ }
+ else
+ {
+ int rc2 = rtR0DbgKrnlInfoModRetainEx(pszModule, &pMod, &pCtf);
+ if (RT_FAILURE(rc2))
+ return rc2;
+ Assert(pMod);
+ Assert(pCtf);
+ }
+
+ int rc = VERR_NOT_FOUND;
+ ctf_id_t TypeIdent = ctf_lookup_by_name(pCtf, pszType);
+ if (TypeIdent != CTF_ERR)
+ {
+ ssize_t cbType = ctf_type_size(pCtf, TypeIdent);
+ if (cbType > 0)
+ {
+ *pcbType = cbType;
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_WRONG_TYPE;
+ }
+
+ if (pszModule)
+ rtR0DbgKrnlInfoModRelease(pMod, pCtf);
+ return rc;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c
new file mode 100644
index 00000000..e8dd270c
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c
@@ -0,0 +1,300 @@
+/* $Id: initterm-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Initialization & Termination, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include "internal/initterm.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Kernel debug info handle. */
+RTDBGKRNLINFO g_hKrnlDbgInfo;
+/** Indicates that the spl routines (and therefore a bunch of other ones too)
+ * will set EFLAGS::IF and break code that disables interrupts. */
+bool g_frtSolSplSetsEIF = false;
+/** timeout_generic address. */
+PFNSOL_timeout_generic g_pfnrtR0Sol_timeout_generic = NULL;
+/** untimeout_generic address. */
+PFNSOL_untimeout_generic g_pfnrtR0Sol_untimeout_generic = NULL;
+/** cyclic_reprogram address. */
+PFNSOL_cyclic_reprogram g_pfnrtR0Sol_cyclic_reprogram = NULL;
+/** page_noreloc_supported address. */
+PFNSOL_page_noreloc_supported g_pfnrtR0Sol_page_noreloc_supported = NULL;
+/** Whether to use the kernel page freelist. */
+bool g_frtSolUseKflt = false;
+/** Whether we've completed R0 initialization. */
+bool g_frtSolInitDone = false;
+/** Whether to use old-style xc_call interface. */
+bool g_frtSolOldIPI = false;
+/** Whether to use old-style xc_call interface using one ulong_t as the CPU set
+ * representation. */
+bool g_frtSolOldIPIUlong = false;
+/** The xc_call callout table structure. */
+RTR0FNSOLXCCALL g_rtSolXcCall;
+/** Whether to use the old-style installctx()/removectx() routines. */
+bool g_frtSolOldThreadCtx = false;
+/** The thread-context hooks callout table structure. */
+RTR0FNSOLTHREADCTX g_rtSolThreadCtx;
+/** Thread preemption offset in the thread structure. */
+size_t g_offrtSolThreadPreempt;
+/** Thread ID offset in the thread structure. */
+size_t g_offrtSolThreadId;
+/** The interrupt (pinned) thread pointer offset in the thread structure. */
+size_t g_offrtSolThreadIntrThread;
+/** The dispatcher lock pointer offset in the thread structure. */
+size_t g_offrtSolThreadLock;
+/** The process pointer offset in the thread structure. */
+size_t g_offrtSolThreadProc;
+/** Host scheduler preemption offset. */
+size_t g_offrtSolCpuPreempt;
+/** Host scheduler force preemption offset. */
+size_t g_offrtSolCpuForceKernelPreempt;
+/** Whether to use the old-style map_addr() routine. */
+bool g_frtSolOldMapAddr = false;
+/** The map_addr() hooks callout table structure. */
+RTR0FNSOLMAPADDR g_rtSolMapAddr;
+/* Resolve using dl_lookup (remove if no longer relevant for supported S10 versions) */
+extern void contig_free(void *addr, size_t size);
+#pragma weak contig_free
+/** contig_free address. */
+PFNSOL_contig_free g_pfnrtR0Sol_contig_free = contig_free;
+
+DECLHIDDEN(int) rtR0InitNative(void)
+{
+ /*
+ * IPRT has not yet been initialized at this point, so use Solaris' native cmn_err() for logging.
+ */
+ int rc = RTR0DbgKrnlInfoOpen(&g_hKrnlDbgInfo, 0 /* fFlags */);
+ if (RT_SUCCESS(rc))
+ {
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ /*
+ * Detect whether spl*() is preserving the interrupt flag or not.
+ * This is a problem on S10.
+ */
+ RTCCUINTREG uOldFlags = ASMIntDisableFlags();
+ int iOld = splr(DISP_LEVEL);
+ if (ASMIntAreEnabled())
+ g_frtSolSplSetsEIF = true;
+ splx(iOld);
+ if (ASMIntAreEnabled())
+ g_frtSolSplSetsEIF = true;
+ ASMSetFlags(uOldFlags);
+#else
+ /* PORTME: See if the amd64/x86 problem applies to this architecture. */
+#endif
+ /*
+ * Mandatory: Preemption offsets.
+ */
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_runrun", &g_offrtSolCpuPreempt);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_runrun!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_kprunrun", &g_offrtSolCpuForceKernelPreempt);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_kprunrun!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_preempt", &g_offrtSolThreadPreempt);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find kthread_t::t_preempt!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_did", &g_offrtSolThreadId);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find kthread_t::t_did!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_intr", &g_offrtSolThreadIntrThread);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find kthread_t::t_intr!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_lockp", &g_offrtSolThreadLock);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find kthread_t::t_lockp!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_procp", &g_offrtSolThreadProc);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find kthread_t::t_procp!\n");
+ goto errorbail;
+ }
+ cmn_err(CE_CONT, "!cpu_t::cpu_runrun @ 0x%lx (%ld)\n", g_offrtSolCpuPreempt, g_offrtSolCpuPreempt);
+ cmn_err(CE_CONT, "!cpu_t::cpu_kprunrun @ 0x%lx (%ld)\n", g_offrtSolCpuForceKernelPreempt, g_offrtSolCpuForceKernelPreempt);
+ cmn_err(CE_CONT, "!kthread_t::t_preempt @ 0x%lx (%ld)\n", g_offrtSolThreadPreempt, g_offrtSolThreadPreempt);
+ cmn_err(CE_CONT, "!kthread_t::t_did @ 0x%lx (%ld)\n", g_offrtSolThreadId, g_offrtSolThreadId);
+ cmn_err(CE_CONT, "!kthread_t::t_intr @ 0x%lx (%ld)\n", g_offrtSolThreadIntrThread, g_offrtSolThreadIntrThread);
+ cmn_err(CE_CONT, "!kthread_t::t_lockp @ 0x%lx (%ld)\n", g_offrtSolThreadLock, g_offrtSolThreadLock);
+ cmn_err(CE_CONT, "!kthread_t::t_procp @ 0x%lx (%ld)\n", g_offrtSolThreadProc, g_offrtSolThreadProc);
+
+ /*
+ * Mandatory: CPU cross call infrastructure. Refer the-solaris-kernel.h for details.
+ */
+ rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "xc_init_cpu", NULL /* ppvSymbol */);
+ if (RT_SUCCESS(rc))
+ {
+ if (ncpus > IPRT_SOL_NCPUS)
+ {
+ cmn_err(CE_NOTE, "rtR0InitNative: CPU count mismatch! ncpus=%d IPRT_SOL_NCPUS=%d\n", ncpus, IPRT_SOL_NCPUS);
+ rc = VERR_NOT_SUPPORTED;
+ goto errorbail;
+ }
+ g_rtSolXcCall.u.pfnSol_xc_call = (void *)xc_call;
+ }
+ else
+ {
+ g_frtSolOldIPI = true;
+ g_rtSolXcCall.u.pfnSol_xc_call_old = (void *)xc_call;
+ if (max_cpuid + 1 == sizeof(ulong_t) * 8)
+ {
+ g_frtSolOldIPIUlong = true;
+ g_rtSolXcCall.u.pfnSol_xc_call_old_ulong = (void *)xc_call;
+ }
+ else if (max_cpuid + 1 != IPRT_SOL_NCPUS)
+ {
+ cmn_err(CE_NOTE, "rtR0InitNative: cpuset_t size mismatch! max_cpuid=%d IPRT_SOL_NCPUS=%d\n", max_cpuid,
+ IPRT_SOL_NCPUS);
+ rc = VERR_NOT_SUPPORTED;
+ goto errorbail;
+ }
+ }
+
+ /*
+ * Mandatory: Thread-context hooks.
+ */
+ rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "exitctx", NULL /* ppvSymbol */);
+ if (RT_SUCCESS(rc))
+ {
+ g_rtSolThreadCtx.Install.pfnSol_installctx = (void *)installctx;
+ g_rtSolThreadCtx.Remove.pfnSol_removectx = (void *)removectx;
+ }
+ else
+ {
+ g_frtSolOldThreadCtx = true;
+ g_rtSolThreadCtx.Install.pfnSol_installctx_old = (void *)installctx;
+ g_rtSolThreadCtx.Remove.pfnSol_removectx_old = (void *)removectx;
+ }
+
+ /*
+ * Mandatory: map_addr() hooks.
+ */
+ rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "plat_map_align_amount", NULL /* ppvSymbol */);
+ if (RT_SUCCESS(rc))
+ {
+ g_rtSolMapAddr.u.pfnSol_map_addr = (void *)map_addr;
+ }
+ else
+ {
+ g_frtSolOldMapAddr = true;
+ g_rtSolMapAddr.u.pfnSol_map_addr_old = (void *)map_addr;
+ }
+
+ /*
+ * Optional: Timeout hooks.
+ */
+ RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "timeout_generic",
+ (void **)&g_pfnrtR0Sol_timeout_generic);
+ RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "untimeout_generic",
+ (void **)&g_pfnrtR0Sol_untimeout_generic);
+ if ((g_pfnrtR0Sol_timeout_generic == NULL) != (g_pfnrtR0Sol_untimeout_generic == NULL))
+ {
+ static const char *s_apszFn[2] = { "timeout_generic", "untimeout_generic" };
+ bool iMissingFn = g_pfnrtR0Sol_timeout_generic == NULL;
+ cmn_err(CE_NOTE, "rtR0InitNative: Weird! Found %s but not %s!\n", s_apszFn[!iMissingFn], s_apszFn[iMissingFn]);
+ g_pfnrtR0Sol_timeout_generic = NULL;
+ g_pfnrtR0Sol_untimeout_generic = NULL;
+ }
+ RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "cyclic_reprogram",
+ (void **)&g_pfnrtR0Sol_cyclic_reprogram);
+
+ /*
+ * Optional: Querying page no-relocation support.
+ */
+ RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /*pszModule */, "page_noreloc_supported",
+ (void **)&g_pfnrtR0Sol_page_noreloc_supported);
+
+ /*
+ * Weak binding failures: contig_free
+ */
+ if (g_pfnrtR0Sol_contig_free == NULL)
+ {
+ rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "contig_free",
+ (void **)&g_pfnrtR0Sol_contig_free);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "rtR0InitNative: failed to find contig_free!\n");
+ goto errorbail;
+ }
+ }
+
+ g_frtSolInitDone = true;
+ return VINF_SUCCESS;
+ }
+ else
+ {
+ cmn_err(CE_NOTE, "RTR0DbgKrnlInfoOpen failed. rc=%d\n", rc);
+ return rc;
+ }
+
+errorbail:
+ RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
+ return rc;
+}
+
+
+DECLHIDDEN(void) rtR0TermNative(void)
+{
+ RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
+ g_frtSolInitDone = false;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c
new file mode 100644
index 00000000..ebf2eae2
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c
@@ -0,0 +1,1172 @@
+/* $Id: memobj-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/memobj.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/log.h>
+#include <iprt/mem.h>
+#include <iprt/param.h>
+#include <iprt/process.h>
+#include "internal/memobj.h"
+#include "memobj-r0drv-solaris.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase)
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The Solaris version of the memory object structure.
+ */
+typedef struct RTR0MEMOBJSOL
+{
+ /** The core structure. */
+ RTR0MEMOBJINTERNAL Core;
+ /** Pointer to kernel memory cookie. */
+ ddi_umem_cookie_t Cookie;
+ /** Shadow locked pages. */
+ void *pvHandle;
+ /** Access during locking. */
+ int fAccess;
+ /** Set if large pages are involved in an RTR0MEMOBJTYPE_PHYS
+ * allocation. */
+ bool fLargePage;
+ /** Whether we have individual pages or a kernel-mapped virtual memory block in
+ * an RTR0MEMOBJTYPE_PHYS_NC allocation. */
+ bool fIndivPages;
+} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static vnode_t g_PageVnode;
+static kmutex_t g_OffsetMtx;
+static u_offset_t g_offPage;
+
+static vnode_t g_LargePageVnode;
+static kmutex_t g_LargePageOffsetMtx;
+static u_offset_t g_offLargePage;
+static bool g_fLargePageNoReloc;
+
+
+/**
+ * Returns the physical address for a virtual address.
+ *
+ * @param pv The virtual address.
+ *
+ * @returns The physical address corresponding to @a pv.
+ */
+static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
+{
+ struct hat *pHat = NULL;
+ pfn_t PageFrameNum = 0;
+ uintptr_t uVirtAddr = (uintptr_t)pv;
+
+ if (SOL_IS_KRNL_ADDR(pv))
+ pHat = kas.a_hat;
+ else
+ {
+ proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
+ AssertRelease(pProcess);
+ pHat = pProcess->p_as->a_hat;
+ }
+
+ PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
+ AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
+ return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK));
+}
+
+
+/**
+ * Returns the physical address for a page.
+ *
+ * @param pPage Pointer to the page.
+ *
+ * @returns The physical address for a page.
+ */
+static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage)
+{
+ AssertPtr(pPage);
+ pfn_t PageFrameNum = page_pptonum(pPage);
+ AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n"));
+ return (uint64_t)PageFrameNum << PAGE_SHIFT;
+}
+
+
+/**
+ * Allocates one page.
+ *
+ * @param virtAddr The virtual address to which this page maybe mapped in
+ * the future.
+ *
+ * @returns Pointer to the allocated page, NULL on failure.
+ */
+static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr)
+{
+ u_offset_t offPage;
+ seg_t KernelSeg;
+
+ /*
+ * 16777215 terabytes of total memory for all VMs or
+ * restart 8000 1GB VMs 2147483 times until wraparound!
+ */
+ mutex_enter(&g_OffsetMtx);
+ AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
+ g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE;
+ offPage = g_offPage;
+ mutex_exit(&g_OffsetMtx);
+
+ KernelSeg.s_as = &kas;
+ page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr);
+ if (RT_LIKELY(pPage))
+ {
+ /*
+ * Lock this page into memory "long term" to prevent this page from being paged out
+ * when we drop the page lock temporarily (during free). Downgrade to a shared lock
+ * to prevent page relocation.
+ */
+ page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
+ page_io_unlock(pPage);
+ page_downgrade(pPage);
+ Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
+ }
+
+ return pPage;
+}
+
+
+/**
+ * Destroys an allocated page.
+ *
+ * @param pPage Pointer to the page to be destroyed.
+ * @remarks This function expects page in @c pPage to be shared locked.
+ */
+static void rtR0MemObjSolPageDestroy(page_t *pPage)
+{
+ /*
+ * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails,
+ * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as
+ * we cannot touch any page_t members once the lock is dropped.
+ */
+ AssertPtr(pPage);
+ Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
+
+ u_offset_t offPage = pPage->p_offset;
+ int rc = page_tryupgrade(pPage);
+ if (!rc)
+ {
+ page_unlock(pPage);
+ page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL);
+
+ /*
+ * Since we allocated the pages as PG_NORELOC we should only get back the exact page always.
+ */
+ AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n",
+ &g_PageVnode, offPage, pFoundPage, pPage));
+ }
+ Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
+ page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
+ page_destroy(pPage, 0 /* move it to the free list */);
+}
+
+
+/* Currently not used on 32-bits, define it to shut up gcc. */
+#if HC_ARCH_BITS == 64
+/**
+ * Allocates physical, non-contiguous memory of pages.
+ *
+ * @param puPhys Where to store the physical address of first page. Optional,
+ * can be NULL.
+ * @param cb The size of the allocation.
+ *
+ * @return Array of allocated pages, NULL on failure.
+ */
+static page_t **rtR0MemObjSolPagesAlloc(uint64_t *puPhys, size_t cb)
+{
+ /*
+ * VM1:
+ * The page freelist and cachelist both hold pages that are not mapped into any address space.
+ * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
+ * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
+ *
+ * VM2:
+ * @todo Document what happens behind the scenes in VM2 regarding the free and cachelist.
+ */
+
+ /*
+ * Non-pageable memory reservation request for _4K pages, don't sleep.
+ */
+ size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int rc = page_resv(cPages, KM_NOSLEEP);
+ if (rc)
+ {
+ size_t cbPages = cPages * sizeof(page_t *);
+ page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
+ if (RT_LIKELY(ppPages))
+ {
+ /*
+ * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
+ * we don't yet have the 'virtAddr' to which this memory may be mapped.
+ */
+ caddr_t virtAddr = 0;
+ for (size_t i = 0; i < cPages; i++, virtAddr += PAGE_SIZE)
+ {
+ /*
+ * Get a page from the free list locked exclusively. The page will be named (hashed in)
+ * and we rely on it during free. The page we get will be shared locked to prevent the page
+ * from being relocated.
+ */
+ page_t *pPage = rtR0MemObjSolPageAlloc(virtAddr);
+ if (RT_UNLIKELY(!pPage))
+ {
+ /*
+ * No page found, release whatever pages we grabbed so far.
+ */
+ for (size_t k = 0; k < i; k++)
+ rtR0MemObjSolPageDestroy(ppPages[k]);
+ kmem_free(ppPages, cbPages);
+ page_unresv(cPages);
+ return NULL;
+ }
+
+ ppPages[i] = pPage;
+ }
+
+ if (puPhys)
+ *puPhys = rtR0MemObjSolPagePhys(ppPages[0]);
+ return ppPages;
+ }
+
+ page_unresv(cPages);
+ }
+
+ return NULL;
+}
+#endif /* HC_ARCH_BITS == 64 */
+
+
+/**
+ * Frees the allocates pages.
+ *
+ * @param ppPages Pointer to the page list.
+ * @param cbPages Size of the allocation.
+ */
+static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
+{
+ size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ size_t cbPages = cPages * sizeof(page_t *);
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ rtR0MemObjSolPageDestroy(ppPages[iPage]);
+
+ kmem_free(ppPages, cbPages);
+ page_unresv(cPages);
+}
+
+
+/**
+ * Allocates one large page.
+ *
+ * @param puPhys Where to store the physical address of the allocated
+ * page. Optional, can be NULL.
+ * @param cbLargePage Size of the large page.
+ *
+ * @returns Pointer to a list of pages that cover the large page, NULL on
+ * failure.
+ */
+static page_t **rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cbLargePage)
+{
+ /*
+ * Check PG_NORELOC support for large pages. Using this helps prevent _1G page
+ * fragementation on systems that support it.
+ */
+ static bool fPageNoRelocChecked = false;
+ if (fPageNoRelocChecked == false)
+ {
+ fPageNoRelocChecked = true;
+ g_fLargePageNoReloc = false;
+ if ( g_pfnrtR0Sol_page_noreloc_supported
+ && g_pfnrtR0Sol_page_noreloc_supported(cbLargePage))
+ {
+ g_fLargePageNoReloc = true;
+ }
+ }
+
+ /*
+ * Non-pageable memory reservation request for _4K pages, don't sleep.
+ */
+ size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ size_t cbPages = cPages * sizeof(page_t *);
+ u_offset_t offPage = 0;
+ int rc = page_resv(cPages, KM_NOSLEEP);
+ if (rc)
+ {
+ page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
+ if (RT_LIKELY(ppPages))
+ {
+ mutex_enter(&g_LargePageOffsetMtx);
+ AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
+ g_offLargePage = RT_ALIGN_64(g_offLargePage, cbLargePage) + cbLargePage;
+ offPage = g_offLargePage;
+ mutex_exit(&g_LargePageOffsetMtx);
+
+ seg_t KernelSeg;
+ KernelSeg.s_as = &kas;
+ page_t *pRootPage = page_create_va_large(&g_LargePageVnode, offPage, cbLargePage,
+ PG_EXCL | (g_fLargePageNoReloc ? PG_NORELOC : 0), &KernelSeg,
+ 0 /* vaddr */,NULL /* locality group */);
+ if (pRootPage)
+ {
+ /*
+ * Split it into sub-pages, downgrade each page to a shared lock to prevent page relocation.
+ */
+ page_t *pPageList = pRootPage;
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ {
+ page_t *pPage = pPageList;
+ AssertPtr(pPage);
+ AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
+ ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
+ AssertMsg(pPage->p_szc == pRootPage->p_szc, ("Size code mismatch %p %d %d\n", pPage,
+ (int)pPage->p_szc, (int)pRootPage->p_szc));
+
+ /*
+ * Lock the page into memory "long term". This prevents callers of page_try_demote_pages() (such as the
+ * pageout scanner) from demoting the large page into smaller pages while we temporarily release the
+ * exclusive lock (during free). We pass "0, 1" since we've already accounted for availrmem during
+ * page_resv().
+ */
+ page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
+
+ page_sub(&pPageList, pPage);
+ page_io_unlock(pPage);
+ page_downgrade(pPage);
+ Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
+
+ ppPages[iPage] = pPage;
+ }
+ Assert(pPageList == NULL);
+ Assert(ppPages[0] == pRootPage);
+
+ uint64_t uPhys = rtR0MemObjSolPagePhys(pRootPage);
+ AssertMsg(!(uPhys & (cbLargePage - 1)), ("%llx %zx\n", uPhys, cbLargePage));
+ if (puPhys)
+ *puPhys = uPhys;
+ return ppPages;
+ }
+
+ /*
+ * Don't restore offPrev in case of failure (race condition), we have plenty of offset space.
+ * The offset must be unique (for the same vnode) or we'll encounter panics on page_create_va_large().
+ */
+ kmem_free(ppPages, cbPages);
+ }
+
+ page_unresv(cPages);
+ }
+ return NULL;
+}
+
+
+/**
+ * Frees the large page.
+ *
+ * @param ppPages Pointer to the list of small pages that cover the
+ * large page.
+ * @param cbLargePage Size of the allocation (i.e. size of the large
+ * page).
+ */
+static void rtR0MemObjSolLargePageFree(page_t **ppPages, size_t cbLargePage)
+{
+ Assert(ppPages);
+ Assert(cbLargePage > PAGE_SIZE);
+
+ bool fDemoted = false;
+ size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ size_t cbPages = cPages * sizeof(page_t *);
+ page_t *pPageList = ppPages[0];
+
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ {
+ /*
+ * We need the pages exclusively locked, try upgrading the shared lock.
+ * If it fails, drop the shared page lock (cannot access any page_t members once this is done)
+ * and lookup the page from the page hash locking it exclusively.
+ */
+ page_t *pPage = ppPages[iPage];
+ u_offset_t offPage = pPage->p_offset;
+ int rc = page_tryupgrade(pPage);
+ if (!rc)
+ {
+ page_unlock(pPage);
+ page_t *pFoundPage = page_lookup(&g_LargePageVnode, offPage, SE_EXCL);
+ AssertRelease(pFoundPage);
+
+ if (g_fLargePageNoReloc)
+ {
+ /*
+ * This can only be guaranteed if PG_NORELOC is used while allocating the pages.
+ */
+ AssertReleaseMsg(pFoundPage == pPage,
+ ("lookup failed %p:%llu returned %p, expected %p\n", &g_LargePageVnode, offPage,
+ pFoundPage, pPage));
+ }
+
+ /*
+ * Check for page demotion (regardless of relocation). Some places in Solaris (e.g. VM1 page_retire())
+ * could possibly demote the large page to _4K pages between our call to page_unlock() and page_lookup().
+ */
+ if (page_get_pagecnt(pFoundPage->p_szc) == 1) /* Base size of only _4K associated with this page. */
+ fDemoted = true;
+ pPage = pFoundPage;
+ ppPages[iPage] = pFoundPage;
+ }
+ Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
+ page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
+ }
+
+ if (fDemoted)
+ {
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ {
+ Assert(page_get_pagecnt(ppPages[iPage]->p_szc) == 1);
+ page_destroy(ppPages[iPage], 0 /* move it to the free list */);
+ }
+ }
+ else
+ {
+ /*
+ * Although we shred the adjacent pages in the linked list, page_destroy_pages works on
+ * adjacent pages via array increments. So this does indeed free all the pages.
+ */
+ AssertPtr(pPageList);
+ page_destroy_pages(pPageList);
+ }
+ kmem_free(ppPages, cbPages);
+ page_unresv(cPages);
+}
+
+
+/**
+ * Unmaps kernel/user-space mapped memory.
+ *
+ * @param pv Pointer to the mapped memory block.
+ * @param cb Size of the memory block.
+ */
+static void rtR0MemObjSolUnmap(void *pv, size_t cb)
+{
+ if (SOL_IS_KRNL_ADDR(pv))
+ {
+ hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
+ vmem_free(heap_arena, pv, cb);
+ }
+ else
+ {
+ struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
+ AssertPtr(pAddrSpace);
+ as_rangelock(pAddrSpace);
+ as_unmap(pAddrSpace, pv, cb);
+ as_rangeunlock(pAddrSpace);
+ }
+}
+
+
+/**
+ * Lock down memory mappings for a virtual address.
+ *
+ * @param pv Pointer to the memory to lock down.
+ * @param cb Size of the memory block.
+ * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC)
+ *
+ * @returns IPRT status code.
+ */
+static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
+{
+ /*
+ * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
+ */
+ if (!SOL_IS_KRNL_ADDR(pv))
+ {
+ proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
+ AssertPtr(pProc);
+ faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
+ if (rc)
+ {
+ LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
+ return VERR_LOCK_FAILED;
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Unlock memory mappings for a virtual address.
+ *
+ * @param pv Pointer to the locked memory.
+ * @param cb Size of the memory block.
+ * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC).
+ */
+static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
+{
+ if (!SOL_IS_KRNL_ADDR(pv))
+ {
+ proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
+ AssertPtr(pProcess);
+ as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
+ }
+}
+
+
+/**
+ * Maps a list of physical pages into user address space.
+ *
+ * @param pVirtAddr Where to store the virtual address of the mapping.
+ * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE,
+ * PROT_EXEC)
+ * @param paPhysAddrs Array of physical addresses to pages.
+ * @param cb Size of memory being mapped.
+ *
+ * @returns IPRT status code.
+ */
+static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb, size_t cbPageSize)
+{
+ struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
+ int rc;
+ SEGVBOX_CRARGS Args;
+
+ Args.paPhysAddrs = paPhysAddrs;
+ Args.fPageAccess = fPageAccess;
+ Args.cbPageSize = cbPageSize;
+
+ as_rangelock(pAddrSpace);
+ if (g_frtSolOldMapAddr)
+ g_rtSolMapAddr.u.pfnSol_map_addr_old(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
+ else
+ g_rtSolMapAddr.u.pfnSol_map_addr(pVirtAddr, cb, 0 /* offset */, MAP_SHARED);
+ if (*pVirtAddr != NULL)
+ rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
+ else
+ rc = ENOMEM;
+ as_rangeunlock(pAddrSpace);
+
+ return RTErrConvertFromErrno(rc);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
+{
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
+
+ switch (pMemSolaris->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_LOW:
+ rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+ break;
+
+ case RTR0MEMOBJTYPE_PHYS:
+ if (pMemSolaris->Core.u.Phys.fAllocated)
+ {
+ if (pMemSolaris->fLargePage)
+ rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
+ else
+ rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+ }
+ break;
+
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ if (pMemSolaris->fIndivPages)
+ rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
+ else
+ rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+ break;
+
+ case RTR0MEMOBJTYPE_PAGE:
+ ddi_umem_free(pMemSolaris->Cookie);
+ break;
+
+ case RTR0MEMOBJTYPE_LOCK:
+ rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
+ break;
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+ break;
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ {
+ if (pMemSolaris->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
+ vmem_xfree(heap_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+ else
+ AssertFailed();
+ break;
+ }
+
+ case RTR0MEMOBJTYPE_CONT: /* we don't use this type here. */
+ default:
+ AssertMsgFailed(("enmType=%d\n", pMemSolaris->Core.enmType));
+ return VERR_INTERNAL_ERROR;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ /* Create the object. */
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);
+ if (RT_UNLIKELY(!pMemSolaris))
+ return VERR_NO_MEMORY;
+
+ void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
+ if (RT_UNLIKELY(!pvMem))
+ {
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return VERR_NO_PAGE_MEMORY;
+ }
+
+ pMemSolaris->Core.pv = pvMem;
+ pMemSolaris->pvHandle = NULL;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ NOREF(fExecutable);
+
+ /* Create the object */
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);
+ if (!pMemSolaris)
+ return VERR_NO_MEMORY;
+
+ /* Allocate physically low page-aligned memory. */
+ uint64_t uPhysHi = _4G - 1;
+ void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */);
+ if (RT_UNLIKELY(!pvMem))
+ {
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return VERR_NO_LOW_MEMORY;
+ }
+ pMemSolaris->Core.pv = pvMem;
+ pMemSolaris->pvHandle = NULL;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ NOREF(fExecutable);
+ return rtR0MemObjNativeAllocPhys(ppMem, cb, _4G - 1, PAGE_SIZE /* alignment */);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
+{
+#if HC_ARCH_BITS == 64
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
+ if (RT_UNLIKELY(!pMemSolaris))
+ return VERR_NO_MEMORY;
+
+ if (PhysHighest == NIL_RTHCPHYS)
+ {
+ uint64_t PhysAddr = UINT64_MAX;
+ void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb);
+ if (!pvPages)
+ {
+ LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return VERR_NO_MEMORY;
+ }
+ Assert(PhysAddr != UINT64_MAX);
+ Assert(!(PhysAddr & PAGE_OFFSET_MASK));
+
+ pMemSolaris->Core.pv = NULL;
+ pMemSolaris->pvHandle = pvPages;
+ pMemSolaris->fIndivPages = true;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+ }
+ else
+ {
+ /*
+ * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages.
+ * We fall back to using contig_alloc().
+ */
+ uint64_t PhysAddr = UINT64_MAX;
+ void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */);
+ if (!pvMem)
+ {
+ LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest));
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return VERR_NO_MEMORY;
+ }
+ Assert(PhysAddr != UINT64_MAX);
+ Assert(!(PhysAddr & PAGE_OFFSET_MASK));
+
+ pMemSolaris->Core.pv = pvMem;
+ pMemSolaris->pvHandle = NULL;
+ pMemSolaris->fIndivPages = false;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+ }
+
+#else /* 32 bit: */
+ return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */
+#endif
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
+{
+ AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
+
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (RT_UNLIKELY(!pMemSolaris))
+ return VERR_NO_MEMORY;
+
+ /*
+ * Allocating one large page gets special treatment.
+ */
+ static uint32_t s_cbLargePage = UINT32_MAX;
+ if (s_cbLargePage == UINT32_MAX)
+ {
+ if (page_num_pagesizes() > 1)
+ ASMAtomicWriteU32(&s_cbLargePage, page_get_pagesize(1)); /* Page-size code 1 maps to _2M on Solaris x86/amd64. */
+ else
+ ASMAtomicWriteU32(&s_cbLargePage, 0);
+ }
+
+ uint64_t PhysAddr;
+ if ( cb == s_cbLargePage
+ && cb == uAlignment
+ && PhysHighest == NIL_RTHCPHYS)
+ {
+ /*
+ * Allocate one large page (backed by physically contiguous memory).
+ */
+ void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
+ if (RT_LIKELY(pvPages))
+ {
+ AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
+ pMemSolaris->Core.pv = NULL;
+ pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
+ pMemSolaris->Core.u.Phys.fAllocated = true;
+ pMemSolaris->pvHandle = pvPages;
+ pMemSolaris->fLargePage = true;
+
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+ }
+ }
+ else
+ {
+ /*
+ * Allocate physically contiguous memory aligned as specified.
+ */
+ AssertCompile(NIL_RTHCPHYS == UINT64_MAX); NOREF(RTASSERTVAR);
+ PhysAddr = PhysHighest;
+ void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
+ if (RT_LIKELY(pvMem))
+ {
+ Assert(!(PhysAddr & PAGE_OFFSET_MASK));
+ Assert(PhysAddr < PhysHighest);
+ Assert(PhysAddr + cb <= PhysHighest);
+
+ pMemSolaris->Core.pv = pvMem;
+ pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
+ pMemSolaris->Core.u.Phys.fAllocated = true;
+ pMemSolaris->pvHandle = NULL;
+ pMemSolaris->fLargePage = false;
+
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+ }
+ }
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return VERR_NO_CONT_MEMORY;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
+{
+ AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
+
+ /* Create the object. */
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (!pMemSolaris)
+ return VERR_NO_MEMORY;
+
+ /* There is no allocation here, it needs to be mapped somewhere first. */
+ pMemSolaris->Core.u.Phys.fAllocated = false;
+ pMemSolaris->Core.u.Phys.PhysBase = Phys;
+ pMemSolaris->Core.u.Phys.uCachePolicy = uCachePolicy;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
+ RTR0PROCESS R0Process)
+{
+ AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
+ NOREF(fAccess);
+
+ /* Create the locking object */
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
+ if (!pMemSolaris)
+ return VERR_NO_MEMORY;
+
+ /* Lock down user pages. */
+ int fPageAccess = S_READ;
+ if (fAccess & RTMEM_PROT_WRITE)
+ fPageAccess = S_WRITE;
+ if (fAccess & RTMEM_PROT_EXEC)
+ fPageAccess = S_EXEC;
+ int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return rc;
+ }
+
+ /* Fill in the object attributes and return successfully. */
+ pMemSolaris->Core.u.Lock.R0Process = R0Process;
+ pMemSolaris->pvHandle = NULL;
+ pMemSolaris->fAccess = fPageAccess;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
+{
+ NOREF(fAccess);
+
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
+ if (!pMemSolaris)
+ return VERR_NO_MEMORY;
+
+ /* Lock down kernel pages. */
+ int fPageAccess = S_READ;
+ if (fAccess & RTMEM_PROT_WRITE)
+ fPageAccess = S_WRITE;
+ if (fAccess & RTMEM_PROT_EXEC)
+ fPageAccess = S_EXEC;
+ int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return rc;
+ }
+
+ /* Fill in the object attributes and return successfully. */
+ pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
+ pMemSolaris->pvHandle = NULL;
+ pMemSolaris->fAccess = fPageAccess;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
+{
+ PRTR0MEMOBJSOL pMemSolaris;
+
+ /*
+ * Use xalloc.
+ */
+ void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
+ NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
+ if (RT_UNLIKELY(!pv))
+ return VERR_NO_MEMORY;
+
+ /* Create the object. */
+ pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
+ if (!pMemSolaris)
+ {
+ LogRel(("rtR0MemObjNativeReserveKernel failed to alloc memory object.\n"));
+ vmem_xfree(heap_arena, pv, cb);
+ return VERR_NO_MEMORY;
+ }
+
+ pMemSolaris->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
+ RTR0PROCESS R0Process)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
+ unsigned fProt, size_t offSub, size_t cbSub)
+{
+ /* Fail if requested to do something we can't. */
+ AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Use xalloc to get address space.
+ */
+ if (!cbSub)
+ cbSub = pMemToMap->cb;
+ void *pv = vmem_xalloc(heap_arena, cbSub, uAlignment, 0 /* phase */, 0 /* nocross */,
+ NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
+ if (RT_UNLIKELY(!pv))
+ return VERR_MAP_FAILED;
+
+ /*
+ * Load the pages from the other object into it.
+ */
+ uint32_t fAttr = HAT_UNORDERED_OK | HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
+ if (fProt & RTMEM_PROT_READ)
+ fAttr |= PROT_READ;
+ if (fProt & RTMEM_PROT_EXEC)
+ fAttr |= PROT_EXEC;
+ if (fProt & RTMEM_PROT_WRITE)
+ fAttr |= PROT_WRITE;
+ fAttr |= HAT_NOSYNC;
+
+ int rc = VINF_SUCCESS;
+ size_t off = 0;
+ while (off < cbSub)
+ {
+ RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(pMemToMap, (offSub + off) >> PAGE_SHIFT);
+ AssertBreakStmt(HCPhys != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_2);
+ pfn_t pfn = HCPhys >> PAGE_SHIFT;
+ AssertBreakStmt(((RTHCPHYS)pfn << PAGE_SHIFT) == HCPhys, rc = VERR_INTERNAL_ERROR_3);
+
+ hat_devload(kas.a_hat, (uint8_t *)pv + off, PAGE_SIZE, pfn, fAttr, HAT_LOAD_LOCK);
+
+ /* Advance. */
+ off += PAGE_SIZE;
+ }
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Create a memory object for the mapping.
+ */
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cbSub);
+ if (pMemSolaris)
+ {
+ pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+ }
+
+ LogRel(("rtR0MemObjNativeMapKernel failed to alloc memory object.\n"));
+ rc = VERR_NO_MEMORY;
+ }
+
+ if (off)
+ hat_unload(kas.a_hat, pv, off, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
+ vmem_xfree(heap_arena, pv, cbSub);
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, PRTR0MEMOBJINTERNAL pMemToMap, RTR3PTR R3PtrFixed,
+ size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
+{
+ /*
+ * Fend off things we cannot do.
+ */
+ AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
+ AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
+ if (uAlignment != PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Get parameters from the source object and offSub/cbSub.
+ */
+ PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap;
+ uint8_t *pb = pMemToMapSolaris->Core.pv ? (uint8_t *)pMemToMapSolaris->Core.pv + offSub : NULL;
+ size_t const cb = cbSub ? cbSub : pMemToMapSolaris->Core.cb;
+ size_t const cPages = cb >> PAGE_SHIFT;
+ Assert(!offSub || cbSub);
+ Assert(!(cb & PAGE_OFFSET_MASK));
+
+ /*
+ * Create the mapping object
+ */
+ PRTR0MEMOBJSOL pMemSolaris;
+ pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pb, cb);
+ if (RT_UNLIKELY(!pMemSolaris))
+ return VERR_NO_MEMORY;
+
+ /*
+ * Gather the physical page address of the pages to be mapped.
+ */
+ int rc = VINF_SUCCESS;
+ uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP);
+ if (RT_LIKELY(paPhysAddrs))
+ {
+ if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC
+ && pMemToMapSolaris->fIndivPages)
+ {
+ /* Translate individual page_t to physical addresses. */
+ page_t **papPages = pMemToMapSolaris->pvHandle;
+ AssertPtr(papPages);
+ papPages += offSub >> PAGE_SHIFT;
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ paPhysAddrs[iPage] = rtR0MemObjSolPagePhys(papPages[iPage]);
+ }
+ else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
+ && pMemToMapSolaris->fLargePage)
+ {
+ /* Split up the large page into page-sized chunks. */
+ RTHCPHYS Phys = pMemToMapSolaris->Core.u.Phys.PhysBase;
+ Phys += offSub;
+ for (size_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
+ paPhysAddrs[iPage] = Phys;
+ }
+ else
+ {
+ /* Have kernel mapping, just translate virtual to physical. */
+ AssertPtr(pb);
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ {
+ paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pb);
+ if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
+ {
+ LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
+ rc = VERR_MAP_FAILED;
+ break;
+ }
+ pb += PAGE_SIZE;
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Perform the actual mapping.
+ */
+ unsigned fPageAccess = PROT_READ;
+ if (fProt & RTMEM_PROT_WRITE)
+ fPageAccess |= PROT_WRITE;
+ if (fProt & RTMEM_PROT_EXEC)
+ fPageAccess |= PROT_EXEC;
+
+ caddr_t UserAddr = NULL;
+ rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb, PAGE_SIZE);
+ if (RT_SUCCESS(rc))
+ {
+ pMemSolaris->Core.u.Mapping.R0Process = R0Process;
+ pMemSolaris->Core.pv = UserAddr;
+
+ *ppMem = &pMemSolaris->Core;
+ kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
+ return VINF_SUCCESS;
+ }
+
+ LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
+ }
+
+ rc = VERR_MAP_FAILED;
+ kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
+{
+ NOREF(pMem);
+ NOREF(offSub);
+ NOREF(cbSub);
+ NOREF(fProt);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
+{
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
+
+ switch (pMemSolaris->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ if ( pMemSolaris->Core.u.Phys.fAllocated
+ || !pMemSolaris->fIndivPages)
+ {
+ uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
+ return rtR0MemObjSolVirtToPhys(pb);
+ }
+ page_t **ppPages = pMemSolaris->pvHandle;
+ return rtR0MemObjSolPagePhys(ppPages[iPage]);
+
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_LOCK:
+ {
+ uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
+ return rtR0MemObjSolVirtToPhys(pb);
+ }
+
+ /*
+ * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
+ * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
+ */
+ case RTR0MEMOBJTYPE_MAPPING:
+ return rtR0MemObjNativeGetPagePhysAddr(pMemSolaris->Core.uRel.Child.pParent, iPage);
+
+ case RTR0MEMOBJTYPE_CONT:
+ case RTR0MEMOBJTYPE_PHYS:
+ AssertFailed(); /* handled by the caller */
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ default:
+ return NIL_RTHCPHYS;
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.h b/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.h
new file mode 100644
index 00000000..5e8116cf
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.h
@@ -0,0 +1,322 @@
+/* $Id: memobj-r0drv-solaris.h $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects - Segment driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2012-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_solaris_memobj_r0drv_solaris_h
+#define IPRT_INCLUDED_SRC_r0drv_solaris_memobj_r0drv_solaris_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+/*******************************************************************************
+* Header Files *
+*******************************************************************************/
+#include "the-solaris-kernel.h"
+
+
+/*******************************************************************************
+* Structures and Typedefs *
+*******************************************************************************/
+typedef struct SEGVBOX_CRARGS
+{
+ uint64_t *paPhysAddrs;
+ size_t cbPageSize;
+ uint_t fPageAccess;
+} SEGVBOX_CRARGS;
+typedef SEGVBOX_CRARGS *PSEGVBOX_CRARGS;
+
+typedef struct SEGVBOX_DATA
+{
+ uint_t fPageAccess;
+ size_t cbPageSize;
+} SEGVBOX_DATA;
+typedef SEGVBOX_DATA *PSEGVBOX_DATA;
+
+static struct seg_ops s_SegVBoxOps;
+static vnode_t s_segVBoxVnode;
+
+
+DECLINLINE(int) rtR0SegVBoxSolCreate(seg_t *pSeg, void *pvArgs)
+{
+ struct as *pAddrSpace = pSeg->s_as;
+ PSEGVBOX_CRARGS pArgs = pvArgs;
+ PSEGVBOX_DATA pData = kmem_zalloc(sizeof(*pData), KM_SLEEP);
+
+ AssertPtr(pAddrSpace);
+ AssertPtr(pArgs);
+ AssertPtr(pData);
+
+ /*
+ * Currently we only map _4K pages but this segment driver can handle any size
+ * supported by the Solaris HAT layer.
+ */
+ size_t cbPageSize = pArgs->cbPageSize;
+ size_t uPageShift = 0;
+ switch (cbPageSize)
+ {
+ case _4K: uPageShift = 12; break;
+ case _2M: uPageShift = 21; break;
+ default: AssertReleaseMsgFailed(("Unsupported page size for mapping cbPageSize=%llx\n", cbPageSize)); break;
+ }
+
+ hat_map(pAddrSpace->a_hat, pSeg->s_base, pSeg->s_size, HAT_MAP);
+ pData->fPageAccess = pArgs->fPageAccess | PROT_USER;
+ pData->cbPageSize = cbPageSize;
+
+ pSeg->s_ops = &s_SegVBoxOps;
+ pSeg->s_data = pData;
+
+ /*
+ * Now load and lock down the mappings to the physical addresses.
+ */
+ caddr_t virtAddr = pSeg->s_base;
+ pgcnt_t cPages = (pSeg->s_size + cbPageSize - 1) >> uPageShift;
+ for (pgcnt_t iPage = 0; iPage < cPages; ++iPage, virtAddr += cbPageSize)
+ {
+ hat_devload(pAddrSpace->a_hat, virtAddr, cbPageSize, pArgs->paPhysAddrs[iPage] >> uPageShift,
+ pData->fPageAccess | HAT_UNORDERED_OK, HAT_LOAD_LOCK);
+ }
+
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolDup(seg_t *pSrcSeg, seg_t *pDstSeg)
+{
+ /*
+ * Duplicate a segment and return the new segment in 'pDstSeg'.
+ */
+ PSEGVBOX_DATA pSrcData = pSrcSeg->s_data;
+ PSEGVBOX_DATA pDstData = kmem_zalloc(sizeof(*pDstData), KM_SLEEP);
+
+ AssertPtr(pDstData);
+ AssertPtr(pSrcData);
+
+ pDstData->fPageAccess = pSrcData->fPageAccess;
+ pDstData->cbPageSize = pSrcData->cbPageSize;
+ pDstSeg->s_ops = &s_SegVBoxOps;
+ pDstSeg->s_data = pDstData;
+
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolUnmap(seg_t *pSeg, caddr_t virtAddr, size_t cb)
+{
+ PSEGVBOX_DATA pData = pSeg->s_data;
+
+ AssertRelease(pData);
+ AssertReleaseMsg(virtAddr >= pSeg->s_base, ("virtAddr=%p s_base=%p\n", virtAddr, pSeg->s_base));
+ AssertReleaseMsg(virtAddr + cb <= pSeg->s_base + pSeg->s_size, ("virtAddr=%p cb=%llu s_base=%p s_size=%llu\n", virtAddr,
+ cb, pSeg->s_base, pSeg->s_size));
+ size_t cbPageOffset = pData->cbPageSize - 1;
+ AssertRelease(!(cb & cbPageOffset));
+ AssertRelease(!((uintptr_t)virtAddr & cbPageOffset));
+
+ if ( virtAddr != pSeg->s_base
+ || cb != pSeg->s_size)
+ {
+ return ENOTSUP;
+ }
+
+ hat_unload(pSeg->s_as->a_hat, virtAddr, cb, HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
+
+ seg_free(pSeg);
+ return 0;
+}
+
+
+static void rtR0SegVBoxSolFree(seg_t *pSeg)
+{
+ PSEGVBOX_DATA pData = pSeg->s_data;
+ kmem_free(pData, sizeof(*pData));
+}
+
+
+static int rtR0SegVBoxSolFault(struct hat *pHat, seg_t *pSeg, caddr_t virtAddr, size_t cb, enum fault_type FaultType,
+ enum seg_rw ReadWrite)
+{
+ /*
+ * We would demand fault if the (u)read() path would SEGOP_FAULT() on buffers mapped in via our
+ * segment driver i.e. prefaults before DMA. Don't fail in such case where we're called directly,
+ * see @bugref{5047}.
+ */
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolFaultA(seg_t *pSeg, caddr_t virtAddr)
+{
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolSetProt(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t fPageAccess)
+{
+ return EACCES;
+}
+
+
+static int rtR0SegVBoxSolCheckProt(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t fPageAccess)
+{
+ return EINVAL;
+}
+
+
+static int rtR0SegVBoxSolKluster(seg_t *pSeg, caddr_t virtAddr, ssize_t Delta)
+{
+ return -1;
+}
+
+
+static int rtR0SegVBoxSolSync(seg_t *pSeg, caddr_t virtAddr, size_t cb, int Attr, uint_t fFlags)
+{
+ return 0;
+}
+
+
+static size_t rtR0SegVBoxSolInCore(seg_t *pSeg, caddr_t virtAddr, size_t cb, char *pVec)
+{
+ PSEGVBOX_DATA pData = pSeg->s_data;
+ AssertRelease(pData);
+ size_t uPageOffset = pData->cbPageSize - 1;
+ size_t uPageMask = ~uPageOffset;
+ size_t cbLen = (cb + uPageOffset) & uPageMask;
+ for (virtAddr = 0; cbLen != 0; cbLen -= pData->cbPageSize, virtAddr += pData->cbPageSize)
+ *pVec++ = 1;
+ return cbLen;
+}
+
+
+static int rtR0SegVBoxSolLockOp(seg_t *pSeg, caddr_t virtAddr, size_t cb, int Attr, int Op, ulong_t *pLockMap, size_t off)
+{
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolGetProt(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t *pafPageAccess)
+{
+ PSEGVBOX_DATA pData = pSeg->s_data;
+ size_t iPage = seg_page(pSeg, virtAddr + cb) - seg_page(pSeg, virtAddr) + 1;
+ if (iPage)
+ {
+ do
+ {
+ iPage--;
+ pafPageAccess[iPage] = pData->fPageAccess;
+ } while (iPage);
+ }
+ return 0;
+}
+
+
+static u_offset_t rtR0SegVBoxSolGetOffset(seg_t *pSeg, caddr_t virtAddr)
+{
+ return ((uintptr_t)virtAddr - (uintptr_t)pSeg->s_base);
+}
+
+
+static int rtR0SegVBoxSolGetType(seg_t *pSeg, caddr_t virtAddr)
+{
+ return MAP_SHARED;
+}
+
+
+static int rtR0SegVBoxSolGetVp(seg_t *pSeg, caddr_t virtAddr, vnode_t **ppVnode)
+{
+ *ppVnode = &s_segVBoxVnode;
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolAdvise(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t Behav /* wut? */)
+{
+ return 0;
+}
+
+
+#if defined(VBOX_NEW_CRASH_DUMP_FORMAT)
+static void rtR0SegVBoxSolDump(seg_t *pSeg, dump_addpage_f Func)
+#else
+static void rtR0SegVBoxSolDump(seg_t *pSeg)
+#endif
+{
+ /* Nothing to do. */
+}
+
+
+static int rtR0SegVBoxSolPageLock(seg_t *pSeg, caddr_t virtAddr, size_t cb, page_t ***pppPage, enum lock_type LockType, enum seg_rw ReadWrite)
+{
+ return ENOTSUP;
+}
+
+
+static int rtR0SegVBoxSolSetPageSize(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t SizeCode)
+{
+ return ENOTSUP;
+}
+
+
+static int rtR0SegVBoxSolGetMemId(seg_t *pSeg, caddr_t virtAddr, memid_t *pMemId)
+{
+ return ENODEV;
+}
+
+
+static int rtR0SegVBoxSolCapable(seg_t *pSeg, segcapability_t Capab)
+{
+ return 0;
+}
+
+
+static struct seg_ops s_SegVBoxOps =
+{
+ rtR0SegVBoxSolDup,
+ rtR0SegVBoxSolUnmap,
+ rtR0SegVBoxSolFree,
+ rtR0SegVBoxSolFault,
+ rtR0SegVBoxSolFaultA,
+ rtR0SegVBoxSolSetProt,
+ rtR0SegVBoxSolCheckProt,
+ rtR0SegVBoxSolKluster,
+ NULL, /* swapout */
+ rtR0SegVBoxSolSync,
+ rtR0SegVBoxSolInCore,
+ rtR0SegVBoxSolLockOp,
+ rtR0SegVBoxSolGetProt,
+ rtR0SegVBoxSolGetOffset,
+ rtR0SegVBoxSolGetType,
+ rtR0SegVBoxSolGetVp,
+ rtR0SegVBoxSolAdvise,
+ rtR0SegVBoxSolDump,
+ rtR0SegVBoxSolPageLock,
+ rtR0SegVBoxSolSetPageSize,
+ rtR0SegVBoxSolGetMemId,
+ NULL, /* getpolicy() */
+ rtR0SegVBoxSolCapable
+};
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_solaris_memobj_r0drv_solaris_h */
+
diff --git a/src/VBox/Runtime/r0drv/solaris/memuserkernel-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/memuserkernel-r0drv-solaris.c
new file mode 100644
index 00000000..9d6ff068
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/memuserkernel-r0drv-solaris.c
@@ -0,0 +1,100 @@
+/* $Id: memuserkernel-r0drv-solaris.c $ */
+/** @file
+ * IPRT - User & Kernel Memory, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2009-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+
+
+RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb)
+{
+ int rc;
+ RT_ASSERT_INTS_ON();
+
+ rc = ddi_copyin((const char *)R3PtrSrc, pvDst, cb, 0 /*flags*/);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb)
+{
+ int rc;
+ RT_ASSERT_INTS_ON();
+
+ rc = ddi_copyout(pvSrc, (void *)R3PtrDst, cb, 0 /*flags*/);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(bool) RTR0MemUserIsValidAddr(RTR3PTR R3Ptr)
+{
+ return R3Ptr < kernelbase;
+}
+
+
+RTR0DECL(bool) RTR0MemKernelIsValidAddr(void *pv)
+{
+ return (uintptr_t)pv >= kernelbase;
+}
+
+
+RTR0DECL(bool) RTR0MemAreKrnlAndUsrDifferent(void)
+{
+ return true;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyFrom(void *pvDst, void const *pvSrc, size_t cb)
+{
+ int rc = kcopy(pvSrc, pvDst, cb);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyTo(void *pvDst, void const *pvSrc, size_t cb)
+{
+ int rc = kcopy(pvSrc, pvDst, cb);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/modulestub-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/modulestub-r0drv-solaris.c
new file mode 100644
index 00000000..5b5792c8
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/modulestub-r0drv-solaris.c
@@ -0,0 +1,79 @@
+/* $Id: modulestub-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Ring-0 Solaris stubs
+ */
+
+/*
+ * Copyright (C) 2011-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <sys/modctl.h>
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static struct modlmisc g_rtModuleStubMisc =
+{
+ &mod_miscops, /* extern from kernel */
+ "platform agnostic module"
+};
+
+
+static struct modlinkage g_rtModuleStubModLinkage =
+{
+ MODREV_1, /* loadable module system revision */
+ {
+ &g_rtModuleStubMisc,
+ NULL /* terminate array of linkage structures */
+ }
+};
+
+
+
+int _init(void);
+int _init(void)
+{
+ /* Disable auto unloading. */
+ modctl_t *pModCtl = mod_getctl(&g_rtModuleStubModLinkage);
+ if (pModCtl)
+ pModCtl->mod_loadflags |= MOD_NOAUTOUNLOAD;
+
+ return mod_install(&g_rtModuleStubModLinkage);
+}
+
+
+int _fini(void);
+int _fini(void)
+{
+ return mod_remove(&g_rtModuleStubModLinkage);
+}
+
+
+int _info(struct modinfo *pModInfo);
+int _info(struct modinfo *pModInfo)
+{
+ return mod_info(&g_rtModuleStubModLinkage, pModInfo);
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/mp-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/mp-r0drv-solaris.c
new file mode 100644
index 00000000..f0485ec7
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/mp-r0drv-solaris.c
@@ -0,0 +1,450 @@
+/* $Id: mp-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Multiprocessor, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2008-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mp.h>
+#include <iprt/cpuset.h>
+#include <iprt/thread.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/err.h>
+#include "r0drv/mp-r0drv.h"
+
+typedef int FNRTMPSOLWORKER(void *pvUser1, void *pvUser2, void *pvUser3);
+typedef FNRTMPSOLWORKER *PFNRTMPSOLWORKER;
+
+
+RTDECL(bool) RTMpIsCpuWorkPending(void)
+{
+ return false;
+}
+
+
+RTDECL(RTCPUID) RTMpCpuId(void)
+{
+ return CPU->cpu_id;
+}
+
+
+RTDECL(int) RTMpCurSetIndex(void)
+{
+ return CPU->cpu_id;
+}
+
+
+RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
+{
+ return *pidCpu = CPU->cpu_id;
+}
+
+
+RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
+{
+ return idCpu < RTCPUSET_MAX_CPUS && idCpu <= max_cpuid ? idCpu : -1;
+}
+
+
+RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
+{
+ return (unsigned)iCpu <= max_cpuid ? iCpu : NIL_RTCPUID;
+}
+
+
+RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
+{
+ return max_cpuid;
+}
+
+
+RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
+{
+ /*
+ * We cannot query CPU status recursively, check cpu member from cached set.
+ */
+ if (idCpu >= ncpus)
+ return false;
+
+ return RTCpuSetIsMember(&g_rtMpSolCpuSet, idCpu);
+}
+
+
+RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
+{
+ return idCpu < ncpus;
+}
+
+
+RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
+{
+ RTCPUID idCpu;
+
+ RTCpuSetEmpty(pSet);
+ idCpu = RTMpGetMaxCpuId(); /* it's inclusive */
+ do
+ {
+ if (RTMpIsCpuPossible(idCpu))
+ RTCpuSetAdd(pSet, idCpu);
+ } while (idCpu-- > 0);
+
+ return pSet;
+}
+
+
+RTDECL(RTCPUID) RTMpGetCount(void)
+{
+ return ncpus;
+}
+
+
+RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
+{
+ /*
+ * We cannot query CPU status recursively, return the cached set.
+ */
+ *pSet = g_rtMpSolCpuSet;
+ return pSet;
+}
+
+
+RTDECL(RTCPUID) RTMpGetOnlineCount(void)
+{
+ RTCPUSET Set;
+ RTMpGetOnlineSet(&Set);
+ return RTCpuSetCount(&Set);
+}
+
+
+/**
+ * Wrapper to Solaris IPI infrastructure.
+ *
+ * @returns Solaris error code.
+ * @param pCpuSet Pointer to Solaris CPU set.
+ * @param pfnSolWorker Function to execute on target CPU(s).
+ * @param pArgs Pointer to RTMPARGS to pass to @a pfnSolWorker.
+ */
+static void rtMpSolCrossCall(PRTSOLCPUSET pCpuSet, PFNRTMPSOLWORKER pfnSolWorker, PRTMPARGS pArgs)
+{
+ AssertPtrReturnVoid(pCpuSet);
+ AssertPtrReturnVoid(pfnSolWorker);
+ AssertPtrReturnVoid(pCpuSet);
+
+ if (g_frtSolOldIPI)
+ {
+ if (g_frtSolOldIPIUlong)
+ {
+ g_rtSolXcCall.u.pfnSol_xc_call_old_ulong((xc_arg_t)pArgs, /* Arg to IPI function */
+ 0, /* Arg2, ignored */
+ 0, /* Arg3, ignored */
+ IPRT_SOL_X_CALL_HIPRI, /* IPI priority */
+ pCpuSet->auCpus[0], /* Target CPU(s) */
+ (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
+ }
+ else
+ {
+ g_rtSolXcCall.u.pfnSol_xc_call_old((xc_arg_t)pArgs, /* Arg to IPI function */
+ 0, /* Arg2, ignored */
+ 0, /* Arg3, ignored */
+ IPRT_SOL_X_CALL_HIPRI, /* IPI priority */
+ *pCpuSet, /* Target CPU set */
+ (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
+ }
+ }
+ else
+ {
+ g_rtSolXcCall.u.pfnSol_xc_call((xc_arg_t)pArgs, /* Arg to IPI function */
+ 0, /* Arg2 */
+ 0, /* Arg3 */
+ &pCpuSet->auCpus[0], /* Target CPU set */
+ (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
+ }
+}
+
+
+/**
+ * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
+ * for the RTMpOnAll API.
+ *
+ * @returns Solaris error code.
+ * @param uArgs Pointer to the RTMPARGS package.
+ * @param pvIgnored1 Ignored.
+ * @param pvIgnored2 Ignored.
+ */
+static int rtMpSolOnAllCpuWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)(uArg);
+
+ /*
+ * Solaris CPU cross calls execute on offline CPUs too. Check our CPU cache
+ * set and ignore if it's offline.
+ */
+ if (!RTMpIsCpuOnline(RTMpCpuId()))
+ return 0;
+
+ pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
+
+ NOREF(pvIgnored1);
+ NOREF(pvIgnored2);
+ return 0;
+}
+
+
+RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RTMPARGS Args;
+ RTSOLCPUSET CpuSet;
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RT_ASSERT_INTS_ON();
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = NIL_RTCPUID;
+ Args.cHits = 0;
+
+ for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
+ CpuSet.auCpus[i] = (ulong_t)-1L;
+
+ RTThreadPreemptDisable(&PreemptState);
+
+ rtMpSolCrossCall(&CpuSet, rtMpSolOnAllCpuWrapper, &Args);
+
+ RTThreadPreemptRestore(&PreemptState);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
+ * for the RTMpOnOthers API.
+ *
+ * @returns Solaris error code.
+ * @param uArgs Pointer to the RTMPARGS package.
+ * @param pvIgnored1 Ignored.
+ * @param pvIgnored2 Ignored.
+ */
+static int rtMpSolOnOtherCpusWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)(uArg);
+ RTCPUID idCpu = RTMpCpuId();
+
+ Assert(idCpu != pArgs->idCpu);
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+
+ NOREF(pvIgnored1);
+ NOREF(pvIgnored2);
+ return 0;
+}
+
+
+RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RTMPARGS Args;
+ RTSOLCPUSET CpuSet;
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RT_ASSERT_INTS_ON();
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = RTMpCpuId();
+ Args.cHits = 0;
+
+ /* The caller is supposed to have disabled preemption, but take no chances. */
+ RTThreadPreemptDisable(&PreemptState);
+
+ for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
+ CpuSet.auCpus[0] = (ulong_t)-1L;
+ BT_CLEAR(CpuSet.auCpus, RTMpCpuId());
+
+ rtMpSolCrossCall(&CpuSet, rtMpSolOnOtherCpusWrapper, &Args);
+
+ RTThreadPreemptRestore(&PreemptState);
+
+ return VINF_SUCCESS;
+}
+
+
+
+/**
+ * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
+ * for the RTMpOnPair API.
+ *
+ * @returns Solaris error code.
+ * @param uArgs Pointer to the RTMPARGS package.
+ * @param pvIgnored1 Ignored.
+ * @param pvIgnored2 Ignored.
+ */
+static int rtMpSolOnPairCpuWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)(uArg);
+ RTCPUID idCpu = RTMpCpuId();
+
+ Assert(idCpu == pArgs->idCpu || idCpu == pArgs->idCpu2);
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ ASMAtomicIncU32(&pArgs->cHits);
+
+ NOREF(pvIgnored1);
+ NOREF(pvIgnored2);
+ return 0;
+}
+
+
+RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ int rc;
+ RTMPARGS Args;
+ RTSOLCPUSET CpuSet;
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+
+ AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = idCpu1;
+ Args.idCpu2 = idCpu2;
+ Args.cHits = 0;
+
+ for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
+ CpuSet.auCpus[i] = 0;
+ BT_SET(CpuSet.auCpus, idCpu1);
+ BT_SET(CpuSet.auCpus, idCpu2);
+
+ /*
+ * Check that both CPUs are online before doing the broadcast call.
+ */
+ RTThreadPreemptDisable(&PreemptState);
+ if ( RTMpIsCpuOnline(idCpu1)
+ && RTMpIsCpuOnline(idCpu2))
+ {
+ rtMpSolCrossCall(&CpuSet, rtMpSolOnPairCpuWrapper, &Args);
+
+ Assert(Args.cHits <= 2);
+ if (Args.cHits == 2)
+ rc = VINF_SUCCESS;
+ else if (Args.cHits == 1)
+ rc = VERR_NOT_ALL_CPUS_SHOWED;
+ else if (Args.cHits == 0)
+ rc = VERR_CPU_OFFLINE;
+ else
+ rc = VERR_CPU_IPE_1;
+ }
+ /*
+ * A CPU must be present to be considered just offline.
+ */
+ else if ( RTMpIsCpuPresent(idCpu1)
+ && RTMpIsCpuPresent(idCpu2))
+ rc = VERR_CPU_OFFLINE;
+ else
+ rc = VERR_CPU_NOT_FOUND;
+
+ RTThreadPreemptRestore(&PreemptState);
+ return rc;
+}
+
+
+RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
+{
+ return true;
+}
+
+
+/**
+ * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
+ * for the RTMpOnSpecific API.
+ *
+ * @returns Solaris error code.
+ * @param uArgs Pointer to the RTMPARGS package.
+ * @param pvIgnored1 Ignored.
+ * @param pvIgnored2 Ignored.
+ */
+static int rtMpSolOnSpecificCpuWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)(uArg);
+ RTCPUID idCpu = RTMpCpuId();
+
+ Assert(idCpu == pArgs->idCpu);
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ ASMAtomicIncU32(&pArgs->cHits);
+
+ NOREF(pvIgnored1);
+ NOREF(pvIgnored2);
+ return 0;
+}
+
+
+RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RTMPARGS Args;
+ RTSOLCPUSET CpuSet;
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RT_ASSERT_INTS_ON();
+
+ if (idCpu >= ncpus)
+ return VERR_CPU_NOT_FOUND;
+
+ if (RT_UNLIKELY(!RTMpIsCpuOnline(idCpu)))
+ return RTMpIsCpuPresent(idCpu) ? VERR_CPU_OFFLINE : VERR_CPU_NOT_FOUND;
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = idCpu;
+ Args.cHits = 0;
+
+ for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
+ CpuSet.auCpus[i] = 0;
+ BT_SET(CpuSet.auCpus, idCpu);
+
+ RTThreadPreemptDisable(&PreemptState);
+
+ rtMpSolCrossCall(&CpuSet, rtMpSolOnSpecificCpuWrapper, &Args);
+
+ RTThreadPreemptRestore(&PreemptState);
+
+ Assert(ASMAtomicUoReadU32(&Args.cHits) <= 1);
+
+ return ASMAtomicUoReadU32(&Args.cHits) == 1
+ ? VINF_SUCCESS
+ : VERR_CPU_NOT_FOUND;
+}
+
+
+RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
+{
+ return true;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/mpnotification-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/mpnotification-r0drv-solaris.c
new file mode 100644
index 00000000..1ba45d86
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/mpnotification-r0drv-solaris.c
@@ -0,0 +1,139 @@
+/* $Id: mpnotification-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Multiprocessor Event Notifications, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2008-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/errcore.h>
+#include <iprt/mp.h>
+#include <iprt/cpuset.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include "r0drv/mp-r0drv.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Whether CPUs are being watched or not. */
+static volatile bool g_fSolCpuWatch = false;
+/** Set of online cpus that is maintained by the MP callback.
+ * This avoids locking issues querying the set from the kernel as well as
+ * eliminating any uncertainty regarding the online status during the
+ * callback. */
+RTCPUSET g_rtMpSolCpuSet;
+
+/**
+ * Internal solaris representation for watching CPUs.
+ */
+typedef struct RTMPSOLWATCHCPUS
+{
+ /** Function pointer to Mp worker. */
+ PFNRTMPWORKER pfnWorker;
+ /** Argument to pass to the Mp worker. */
+ void *pvArg;
+} RTMPSOLWATCHCPUS;
+typedef RTMPSOLWATCHCPUS *PRTMPSOLWATCHCPUS;
+
+
+/**
+ * Solaris callback function for Mp event notification.
+ *
+ * @returns Solaris error code.
+ * @param CpuState The current event/state of the CPU.
+ * @param iCpu Which CPU is this event for.
+ * @param pvArg Ignored.
+ *
+ * @remarks This function assumes index == RTCPUID.
+ * We may -not- be firing on the CPU going online/offline and called
+ * with preemption enabled.
+ */
+static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg)
+{
+ RTMPEVENT enmMpEvent;
+
+ /*
+ * Update our CPU set structures first regardless of whether we've been
+ * scheduled on the right CPU or not, this is just atomic accounting.
+ */
+ if (CpuState == CPU_ON)
+ {
+ enmMpEvent = RTMPEVENT_ONLINE;
+ RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu);
+ }
+ else if (CpuState == CPU_OFF)
+ {
+ enmMpEvent = RTMPEVENT_OFFLINE;
+ RTCpuSetDel(&g_rtMpSolCpuSet, iCpu);
+ }
+ else
+ return 0;
+
+ rtMpNotificationDoCallbacks(enmMpEvent, iCpu);
+ NOREF(pvArg);
+ return 0;
+}
+
+
+DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
+{
+ if (ASMAtomicReadBool(&g_fSolCpuWatch) == true)
+ return VERR_WRONG_ORDER;
+
+ /*
+ * Register the callback building the online cpu set as we do so.
+ */
+ RTCpuSetEmpty(&g_rtMpSolCpuSet);
+
+ mutex_enter(&cpu_lock);
+ register_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */);
+
+ for (int i = 0; i < (int)RTMpGetCount(); ++i)
+ if (cpu_is_online(cpu[i]))
+ rtMpNotificationCpuEvent(CPU_ON, i, NULL /* pvArg */);
+
+ ASMAtomicWriteBool(&g_fSolCpuWatch, true);
+ mutex_exit(&cpu_lock);
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
+{
+ if (ASMAtomicReadBool(&g_fSolCpuWatch) == true)
+ {
+ mutex_enter(&cpu_lock);
+ unregister_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */);
+ ASMAtomicWriteBool(&g_fSolCpuWatch, false);
+ mutex_exit(&cpu_lock);
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/process-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/process-r0drv-solaris.c
new file mode 100644
index 00000000..ecf66f78
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/process-r0drv-solaris.c
@@ -0,0 +1,49 @@
+/* $Id: process-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Process Management, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/process.h>
+
+
+
+RTDECL(RTPROCESS) RTProcSelf(void)
+{
+ return ddi_get_pid();
+}
+
+
+RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void)
+{
+ proc_t *pProcess = NULL;
+ drv_getparm(UPROCP, &pProcess);
+ return (RTR0PROCESS)pProcess;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/semevent-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/semevent-r0drv-solaris.c
new file mode 100644
index 00000000..47ef7df1
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/semevent-r0drv-solaris.c
@@ -0,0 +1,347 @@
+/* $Id: semevent-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Single Release Event Semaphores, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENT_WITHOUT_REMAPPING
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/err.h>
+#include <iprt/list.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/thread.h>
+#include "internal/magics.h"
+#include "semeventwait-r0drv-solaris.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Waiter entry. Lives on the stack.
+ *
+ * @remarks Unfortunately, we cannot easily use cv_signal because we cannot
+ * distinguish between it and the spurious wakeups we get after fork.
+ * So, we keep an unprioritized FIFO with the sleeping threads.
+ */
+typedef struct RTSEMEVENTSOLENTRY
+{
+ /** The list node. */
+ RTLISTNODE Node;
+ /** The thread. */
+ kthread_t *pThread;
+ /** Set to @c true when waking up the thread by signal or destroy. */
+ uint32_t volatile fWokenUp;
+} RTSEMEVENTSOLENTRY;
+/** Pointer to waiter entry. */
+typedef RTSEMEVENTSOLENTRY *PRTSEMEVENTSOLENTRY;
+
+
+/**
+ * Solaris event semaphore.
+ */
+typedef struct RTSEMEVENTINTERNAL
+{
+ /** Magic value (RTSEMEVENT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The number of threads referencing this object. */
+ uint32_t volatile cRefs;
+ /** Set if the object is signalled when there are no waiters. */
+ bool fSignaled;
+ /** List of waiting and woken up threads. */
+ RTLISTANCHOR WaitList;
+ /** The Solaris mutex protecting this structure and pairing up the with the cv. */
+ kmutex_t Mtx;
+ /** The Solaris condition variable. */
+ kcondvar_t Cnd;
+} RTSEMEVENTINTERNAL, *PRTSEMEVENTINTERNAL;
+
+
+
+RTDECL(int) RTSemEventCreate(PRTSEMEVENT phEventSem)
+{
+ return RTSemEventCreateEx(phEventSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...)
+{
+ AssertCompile(sizeof(RTSEMEVENTINTERNAL) > sizeof(void *));
+ AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER);
+ Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL));
+ AssertPtrReturn(phEventSem, VERR_INVALID_POINTER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ pThis->u32Magic = RTSEMEVENT_MAGIC;
+ pThis->cRefs = 1;
+ pThis->fSignaled = false;
+ RTListInit(&pThis->WaitList);
+ mutex_init(&pThis->Mtx, "IPRT Event Semaphore", MUTEX_DRIVER, (void *)ipltospl(DISP_LEVEL));
+ cv_init(&pThis->Cnd, "IPRT CV", CV_DRIVER, NULL);
+
+ *phEventSem = pThis;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Retain a reference to the semaphore.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventSolRetain(PRTSEMEVENTINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+ NOREF(cRefs);
+}
+
+
+/**
+ * The destruct.
+ *
+ * @param pThis The semaphore.
+ */
+static void rtR0SemEventSolDtor(PRTSEMEVENTINTERNAL pThis)
+{
+ Assert(pThis->u32Magic != RTSEMEVENT_MAGIC);
+ cv_destroy(&pThis->Cnd);
+ mutex_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+}
+
+
+/**
+ * Release a reference, destroy the thing if necessary.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventSolRelease(PRTSEMEVENTINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ rtR0SemEventSolDtor(pThis);
+}
+
+
+RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = hEventSem;
+ if (pThis == NIL_RTSEMEVENT)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ Assert(pThis->cRefs > 0);
+ RT_ASSERT_INTS_ON();
+
+ mutex_enter(&pThis->Mtx);
+
+ /*
+ * Invalidate the semaphore.
+ */
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENT_MAGIC);
+ ASMAtomicWriteBool(&pThis->fSignaled, false);
+
+ /*
+ * Abort and wake up all threads.
+ */
+ PRTSEMEVENTSOLENTRY pWaiter;
+ RTListForEach(&pThis->WaitList, pWaiter, RTSEMEVENTSOLENTRY, Node)
+ {
+ pWaiter->fWokenUp = true;
+ }
+ cv_broadcast(&pThis->Cnd);
+
+ /*
+ * Release the reference from RTSemEventCreateEx.
+ */
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventSolRelease(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem)
+{
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem;
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+
+ rtR0SemEventSolRetain(pThis);
+ rtR0SemSolWaitEnterMutexWithUnpinningHack(&pThis->Mtx);
+
+ /*
+ * Wake up one thread.
+ */
+ ASMAtomicWriteBool(&pThis->fSignaled, true);
+
+ PRTSEMEVENTSOLENTRY pWaiter;
+ RTListForEach(&pThis->WaitList, pWaiter, RTSEMEVENTSOLENTRY, Node)
+ {
+ if (!pWaiter->fWokenUp)
+ {
+ pWaiter->fWokenUp = true;
+ setrun(pWaiter->pThread);
+ ASMAtomicWriteBool(&pThis->fSignaled, false);
+ break;
+ }
+ }
+
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventSolRelease(pThis);
+
+#ifdef DEBUG_ramshankar
+ /** See @bugref{6318} comment#11 */
+ return VINF_SUCCESS;
+#endif
+ RT_ASSERT_PREEMPT_CPUID();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventWaitEx.
+ * @param uTimeout See RTSemEventWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventSolWait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+
+ rtR0SemEventSolRetain(pThis);
+ mutex_enter(&pThis->Mtx);
+
+ /*
+ * In the signaled state?
+ */
+ int rc;
+ if (ASMAtomicCmpXchgBool(&pThis->fSignaled, false, true))
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait.
+ */
+ RTR0SEMSOLWAIT Wait;
+ rc = rtR0SemSolWaitInit(&Wait, fFlags, uTimeout);
+ if (RT_SUCCESS(rc))
+ {
+ RTSEMEVENTSOLENTRY Waiter; /* ASSUMES we won't get swapped out while waiting (TS_DONT_SWAP). */
+ Waiter.pThread = curthread;
+ Waiter.fWokenUp = false;
+ RTListAppend(&pThis->WaitList, &Waiter.Node);
+
+ for (;;)
+ {
+ /* The destruction test. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else
+ {
+ /* Check the exit conditions. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else if (Waiter.fWokenUp)
+ rc = VINF_SUCCESS;
+ else if (rtR0SemSolWaitHasTimedOut(&Wait))
+ rc = VERR_TIMEOUT;
+ else if (rtR0SemSolWaitWasInterrupted(&Wait))
+ rc = VERR_INTERRUPTED;
+ else
+ {
+ /* Do the wait and then recheck the conditions. */
+ rtR0SemSolWaitDoIt(&Wait, &pThis->Cnd, &pThis->Mtx, &Waiter.fWokenUp, false);
+ continue;
+ }
+ }
+ break;
+ }
+
+ rtR0SemSolWaitDelete(&Wait);
+ RTListNodeRemove(&Waiter.Node);
+ }
+ }
+
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventSolRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventWaitEx(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventSolWait(hEventSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventSolWait(hEventSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+
+
+RTDECL(int) RTSemEventWaitExDebug(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventSolWait(hEventSem, fFlags, uTimeout, &SrcPos);
+}
+
+
+RTDECL(uint32_t) RTSemEventGetResolution(void)
+{
+ return rtR0SemSolWaitGetResolution();
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/semeventmulti-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/semeventmulti-r0drv-solaris.c
new file mode 100644
index 00000000..7b10b610
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/semeventmulti-r0drv-solaris.c
@@ -0,0 +1,355 @@
+/* $Id: semeventmulti-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Multiple Release Event Semaphores, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENTMULTI_WITHOUT_REMAPPING
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/err.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/thread.h>
+#include <iprt/time.h>
+#include "internal/magics.h"
+#include "semeventwait-r0drv-solaris.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** @name fStateAndGen values
+ * @{ */
+/** The state bit number. */
+#define RTSEMEVENTMULTISOL_STATE_BIT 0
+/** The state mask. */
+#define RTSEMEVENTMULTISOL_STATE_MASK RT_BIT_32(RTSEMEVENTMULTISOL_STATE_BIT)
+/** The generation mask. */
+#define RTSEMEVENTMULTISOL_GEN_MASK ~RTSEMEVENTMULTISOL_STATE_MASK
+/** The generation shift. */
+#define RTSEMEVENTMULTISOL_GEN_SHIFT 1
+/** The initial variable value. */
+#define RTSEMEVENTMULTISOL_STATE_GEN_INIT UINT32_C(0xfffffffc)
+/** @} */
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Solaris multiple release event semaphore.
+ */
+typedef struct RTSEMEVENTMULTIINTERNAL
+{
+ /** Magic value (RTSEMEVENTMULTI_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The number of references. */
+ uint32_t volatile cRefs;
+ /** The object state bit and generation counter.
+ * The generation counter is incremented every time the object is
+ * signalled. */
+ uint32_t volatile fStateAndGen;
+ /** The Solaris mutex protecting this structure and pairing up the with the cv. */
+ kmutex_t Mtx;
+ /** The Solaris condition variable. */
+ kcondvar_t Cnd;
+} RTSEMEVENTMULTIINTERNAL, *PRTSEMEVENTMULTIINTERNAL;
+
+
+
+RTDECL(int) RTSemEventMultiCreate(PRTSEMEVENTMULTI phEventMultiSem)
+{
+ return RTSemEventMultiCreateEx(phEventMultiSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass,
+ const char *pszNameFmt, ...)
+{
+ AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phEventMultiSem, VERR_INVALID_POINTER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ AssertCompile(sizeof(RTSEMEVENTMULTIINTERNAL) > sizeof(void *));
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMEVENTMULTI_MAGIC;
+ pThis->cRefs = 1;
+ pThis->fStateAndGen = RTSEMEVENTMULTISOL_STATE_GEN_INIT;
+ mutex_init(&pThis->Mtx, "IPRT Multiple Release Event Semaphore", MUTEX_DRIVER, (void *)ipltospl(DISP_LEVEL));
+ cv_init(&pThis->Cnd, "IPRT CV", CV_DRIVER, NULL);
+
+ *phEventMultiSem = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Retain a reference to the semaphore.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiSolRetain(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+ NOREF(cRefs);
+}
+
+
+/**
+ * Destructor that is called when cRefs == 0.
+ *
+ * @param pThis The instance to destroy.
+ */
+static void rtSemEventMultiDtor(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ Assert(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC);
+ cv_destroy(&pThis->Cnd);
+ mutex_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+}
+
+
+/**
+ * Release a reference, destroy the thing if necessary.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiSolRelease(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ rtSemEventMultiDtor(pThis);
+}
+
+
+
+RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (pThis == NIL_RTSEMEVENTMULTI)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->cRefs > 0, ("pThis=%p cRefs=%d\n", pThis, pThis->cRefs), VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+
+ mutex_enter(&pThis->Mtx);
+
+ /* Invalidate the handle and wake up all threads that might be waiting on the semaphore. */
+ Assert(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC);
+ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMEVENTMULTI_MAGIC_DEAD);
+ ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTISOL_GEN_MASK);
+ cv_broadcast(&pThis->Cnd);
+
+ /* Drop the reference from RTSemEventMultiCreateEx. */
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventMultiSolRelease(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC,
+ ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+ rtR0SemEventMultiSolRetain(pThis);
+ rtR0SemSolWaitEnterMutexWithUnpinningHack(&pThis->Mtx);
+ Assert(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC);
+
+ /*
+ * Do the job.
+ */
+ uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ fNew += 1 << RTSEMEVENTMULTISOL_GEN_SHIFT;
+ fNew |= RTSEMEVENTMULTISOL_STATE_MASK;
+ ASMAtomicWriteU32(&pThis->fStateAndGen, fNew);
+
+ cv_broadcast(&pThis->Cnd);
+
+ mutex_exit(&pThis->Mtx);
+
+ rtR0SemEventMultiSolRelease(pThis);
+#ifdef DEBUG_ramshankar
+ /** See @bugref{6318#c11}. */
+ return VINF_SUCCESS;
+#endif
+ RT_ASSERT_PREEMPT_CPUID();
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC,
+ ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+
+ rtR0SemEventMultiSolRetain(pThis);
+ rtR0SemSolWaitEnterMutexWithUnpinningHack(&pThis->Mtx);
+ Assert(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC);
+
+ /*
+ * Do the job (could be done without the lock, but play safe).
+ */
+ ASMAtomicAndU32(&pThis->fStateAndGen, ~RTSEMEVENTMULTISOL_STATE_MASK);
+
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventMultiSolRelease(pThis);
+
+#ifdef DEBUG_ramshankar
+ /** See @bugref{6318#c11}. */
+ return VINF_SUCCESS;
+#endif
+ RT_ASSERT_PREEMPT_CPUID();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventMultiWaitEx.
+ * @param uTimeout See RTSemEventMultiWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventMultiSolWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ uint32_t fOrgStateAndGen;
+ int rc;
+
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiSolRetain(pThis);
+ mutex_enter(&pThis->Mtx); /* this could be moved down to the else, but play safe for now. */
+
+ /*
+ * Is the event already signalled or do we have to wait?
+ */
+ fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ if (fOrgStateAndGen & RTSEMEVENTMULTISOL_STATE_MASK)
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait.
+ */
+ RTR0SEMSOLWAIT Wait;
+ rc = rtR0SemSolWaitInit(&Wait, fFlags, uTimeout);
+ if (RT_SUCCESS(rc))
+ {
+ for (;;)
+ {
+ /* The destruction test. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else
+ {
+ /* Check the exit conditions. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else if (ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen)
+ rc = VINF_SUCCESS;
+ else if (rtR0SemSolWaitHasTimedOut(&Wait))
+ rc = VERR_TIMEOUT;
+ else if (rtR0SemSolWaitWasInterrupted(&Wait))
+ rc = VERR_INTERRUPTED;
+ else
+ {
+ /* Do the wait and then recheck the conditions. */
+ rtR0SemSolWaitDoIt(&Wait, &pThis->Cnd, &pThis->Mtx, &pThis->fStateAndGen, fOrgStateAndGen);
+ continue;
+ }
+ }
+ break;
+ }
+ rtR0SemSolWaitDelete(&Wait);
+ }
+ }
+
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventMultiSolRelease(pThis);
+ return rc;
+}
+
+
+
+RTDECL(int) RTSemEventMultiWaitEx(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventMultiSolWait(hEventMultiSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventMultiSolWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+
+
+RTDECL(int) RTSemEventMultiWaitExDebug(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventMultiSolWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+}
+
+
+RTDECL(uint32_t) RTSemEventMultiGetResolution(void)
+{
+ return rtR0SemSolWaitGetResolution();
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h b/src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h
new file mode 100644
index 00000000..6ce0ad21
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h
@@ -0,0 +1,496 @@
+/* $Id: semeventwait-r0drv-solaris.h $ */
+/** @file
+ * IPRT - Solaris Ring-0 Driver Helpers for Event Semaphore Waits.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_solaris_semeventwait_r0drv_solaris_h
+#define IPRT_INCLUDED_SRC_r0drv_solaris_semeventwait_r0drv_solaris_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include "the-solaris-kernel.h"
+
+#include <iprt/err.h>
+#include <iprt/string.h>
+#include <iprt/time.h>
+
+
+/** The resolution (nanoseconds) specified when using timeout_generic. */
+#define RTR0SEMSOLWAIT_RESOLUTION 50000
+
+/** Disables the cyclic fallback code for old S10 installs - see @bugref{5342}.
+ * @todo Fixed by @bugref{5595}, can be reenabled after checking out
+ * CY_HIGH_LEVEL. */
+#define RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+
+#define SOL_THREAD_TINTR_PTR ((kthread_t **)((char *)curthread + g_offrtSolThreadIntrThread))
+
+
+/**
+ * Solaris semaphore wait structure.
+ */
+typedef struct RTR0SEMSOLWAIT
+{
+ /** The absolute timeout given as nanoseconds since the start of the
+ * monotonic clock. */
+ uint64_t uNsAbsTimeout;
+ /** The timeout in nanoseconds relative to the start of the wait. */
+ uint64_t cNsRelTimeout;
+ /** The native timeout value. */
+ union
+ {
+ /** The timeout (in ticks) when fHighRes is false. */
+ clock_t lTimeout;
+ } u;
+ /** Set if we use high resolution timeouts. */
+ bool fHighRes;
+ /** Set if it's an indefinite wait. */
+ bool fIndefinite;
+ /** Set if the waiting thread is ready to be woken up.
+ * Avoids false setrun() calls due to temporary mutex exits. */
+ bool volatile fWantWakeup;
+ /** Set if we've already timed out.
+ * Set by rtR0SemSolWaitDoIt or rtR0SemSolWaitHighResTimeout, read by
+ * rtR0SemSolWaitHasTimedOut. */
+ bool volatile fTimedOut;
+ /** Whether the wait was interrupted. */
+ bool fInterrupted;
+ /** Interruptible or uninterruptible wait. */
+ bool fInterruptible;
+ /** The thread to wake up. */
+ kthread_t *pThread;
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ /** Cylic timer ID (used by the timeout callback). */
+ cyclic_id_t idCy;
+#endif
+ /** The mutex associated with the condition variable wait. */
+ void volatile *pvMtx;
+} RTR0SEMSOLWAIT;
+/** Pointer to a solaris semaphore wait structure. */
+typedef RTR0SEMSOLWAIT *PRTR0SEMSOLWAIT;
+
+
+/**
+ * Initializes a wait.
+ *
+ * The caller MUST check the wait condition BEFORE calling this function or the
+ * timeout logic will be flawed.
+ *
+ * @returns VINF_SUCCESS or VERR_TIMEOUT.
+ * @param pWait The wait structure.
+ * @param fFlags The wait flags.
+ * @param uTimeout The timeout.
+ */
+DECLINLINE(int) rtR0SemSolWaitInit(PRTR0SEMSOLWAIT pWait, uint32_t fFlags, uint64_t uTimeout)
+{
+ /*
+ * Process the flags and timeout.
+ */
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
+ uTimeout = uTimeout < UINT64_MAX / RT_NS_1MS
+ ? uTimeout * RT_NS_1MS
+ : UINT64_MAX;
+ if (uTimeout == UINT64_MAX)
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ {
+ uint64_t u64Now;
+ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
+ {
+ if (uTimeout == 0)
+ return VERR_TIMEOUT;
+
+ u64Now = RTTimeSystemNanoTS();
+ pWait->cNsRelTimeout = uTimeout;
+ pWait->uNsAbsTimeout = u64Now + uTimeout;
+ if (pWait->uNsAbsTimeout < u64Now) /* overflow */
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ }
+ else
+ {
+ u64Now = RTTimeSystemNanoTS();
+ if (u64Now >= uTimeout)
+ return VERR_TIMEOUT;
+
+ pWait->cNsRelTimeout = uTimeout - u64Now;
+ pWait->uNsAbsTimeout = uTimeout;
+ }
+ }
+ }
+
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+ pWait->fIndefinite = false;
+ if ( ( (fFlags & (RTSEMWAIT_FLAGS_NANOSECS | RTSEMWAIT_FLAGS_ABSOLUTE))
+ || pWait->cNsRelTimeout < UINT32_C(1000000000) / 100 /*Hz*/ * 4)
+#ifdef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ && g_pfnrtR0Sol_timeout_generic != NULL
+#endif
+ )
+ pWait->fHighRes = true;
+ else
+ {
+ uint64_t cTicks = NSEC_TO_TICK_ROUNDUP(uTimeout);
+ if (cTicks >= LONG_MAX)
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ {
+ pWait->u.lTimeout = cTicks;
+ pWait->fHighRes = false;
+ }
+ }
+ }
+
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ {
+ pWait->fIndefinite = true;
+ pWait->fHighRes = false;
+ pWait->uNsAbsTimeout = UINT64_MAX;
+ pWait->cNsRelTimeout = UINT64_MAX;
+ pWait->u.lTimeout = LONG_MAX;
+ }
+
+ pWait->fWantWakeup = false;
+ pWait->fTimedOut = false;
+ pWait->fInterrupted = false;
+ pWait->fInterruptible = !!(fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE);
+ pWait->pThread = curthread;
+ pWait->pvMtx = NULL;
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ pWait->idCy = CYCLIC_NONE;
+#endif
+
+ return VINF_SUCCESS;
+}
+
+
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+/**
+ * Cyclic timeout callback that sets the timeout indicator and wakes up the
+ * waiting thread.
+ *
+ * @param pvUser The wait structure.
+ */
+static void rtR0SemSolWaitHighResTimeout(void *pvUser)
+{
+ PRTR0SEMSOLWAIT pWait = (PRTR0SEMSOLWAIT)pvUser;
+ kthread_t *pThread = pWait->pThread;
+ kmutex_t *pMtx = (kmutex_t *)ASMAtomicReadPtr(&pWait->pvMtx);
+ if (VALID_PTR(pMtx))
+ {
+ /* Enter the mutex here to make sure the thread has gone to sleep
+ before we wake it up.
+ Note: Trying to take the cpu_lock here doesn't work. */
+ mutex_enter(pMtx);
+ if (mutex_owner(&cpu_lock) == curthread)
+ {
+ cyclic_remove(pWait->idCy);
+ pWait->idCy = CYCLIC_NONE;
+ }
+ bool const fWantWakeup = pWait->fWantWakeup;
+ ASMAtomicWriteBool(&pWait->fTimedOut, true);
+ mutex_exit(pMtx);
+
+ if (fWantWakeup)
+ setrun(pThread);
+ }
+}
+#endif
+
+
+/**
+ * Timeout callback that sets the timeout indicator and wakes up the waiting
+ * thread.
+ *
+ * @param pvUser The wait structure.
+ */
+static void rtR0SemSolWaitTimeout(void *pvUser)
+{
+ PRTR0SEMSOLWAIT pWait = (PRTR0SEMSOLWAIT)pvUser;
+ kthread_t *pThread = pWait->pThread;
+ kmutex_t *pMtx = (kmutex_t *)ASMAtomicReadPtr((void * volatile *)&pWait->pvMtx);
+ if (VALID_PTR(pMtx))
+ {
+ /* Enter the mutex here to make sure the thread has gone to sleep
+ before we wake it up. */
+ mutex_enter(pMtx);
+ bool const fWantWakeup = pWait->fWantWakeup;
+ ASMAtomicWriteBool(&pWait->fTimedOut, true);
+ mutex_exit(pMtx);
+
+ if (fWantWakeup)
+ setrun(pThread);
+ }
+}
+
+
+/**
+ * Do the actual wait.
+ *
+ * @param pWait The wait structure.
+ * @param pCnd The condition variable to wait on.
+ * @param pMtx The mutex related to the condition variable.
+ * The caller has entered this.
+ * @param pfState The state variable to check if have changed
+ * after leaving the mutex (spinlock).
+ * @param fCurState The current value of @a pfState. We'll return
+ * without sleeping if @a pfState doesn't hold
+ * this value after reacquiring the mutex.
+ *
+ * @remarks This must be call with the object mutex (spinlock) held.
+ */
+DECLINLINE(void) rtR0SemSolWaitDoIt(PRTR0SEMSOLWAIT pWait, kcondvar_t *pCnd, kmutex_t *pMtx,
+ uint32_t volatile *pfState, uint32_t const fCurState)
+{
+ union
+ {
+ callout_id_t idCo;
+ timeout_id_t idTom;
+ } u;
+
+ /*
+ * Arm the timeout callback.
+ *
+ * We will have to leave the mutex (spinlock) when doing this because S10
+ * (didn't check S11) will not correctly preserve PIL across calls to
+ * timeout_generic() - @bugref{5595}. We do it for all timeout methods to
+ * be on the safe side, the nice sideeffect of which is that it solves the
+ * lock inversion problem found in @bugref{5342}.
+ */
+ bool const fHasTimeout = !pWait->fIndefinite;
+ bool fGoToSleep = !fHasTimeout;
+ if (fHasTimeout)
+ {
+ pWait->fWantWakeup = false; /* only want fTimedOut */
+ ASMAtomicWritePtr(&pWait->pvMtx, pMtx); /* atomic is paranoia */
+ mutex_exit(pMtx);
+
+ if (pWait->fHighRes)
+ {
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ if (g_pfnrtR0Sol_timeout_generic != NULL)
+#endif
+ {
+ /*
+ * High resolution timeout - arm a high resolution timeout callback
+ * for waking up the thread at the desired time.
+ */
+ u.idCo = g_pfnrtR0Sol_timeout_generic(CALLOUT_REALTIME, rtR0SemSolWaitTimeout, pWait,
+ pWait->uNsAbsTimeout, RTR0SEMSOLWAIT_RESOLUTION,
+ CALLOUT_FLAG_ABSOLUTE);
+ }
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ else
+ {
+ /*
+ * High resolution timeout - arm a one-shot cyclic for waking up
+ * the thread at the desired time.
+ */
+ cyc_handler_t Cyh;
+ Cyh.cyh_arg = pWait;
+ Cyh.cyh_func = rtR0SemSolWaitHighResTimeout;
+ Cyh.cyh_level = CY_LOW_LEVEL; /// @todo try CY_LOCK_LEVEL and CY_HIGH_LEVEL?
+
+ cyc_time_t Cyt;
+ Cyt.cyt_when = pWait->uNsAbsTimeout;
+ Cyt.cyt_interval = UINT64_C(1000000000) * 60;
+
+ mutex_enter(&cpu_lock);
+ pWait->idCy = cyclic_add(&Cyh, &Cyt);
+ mutex_exit(&cpu_lock);
+ }
+#endif
+ }
+ else
+ {
+ /*
+ * Normal timeout.
+ * We're better off with our own callback like on the timeout man page,
+ * than calling cv_timedwait[_sig]().
+ */
+ u.idTom = realtime_timeout(rtR0SemSolWaitTimeout, pWait, pWait->u.lTimeout);
+ }
+
+ /*
+ * Reacquire the mutex and check if the sleep condition still holds and
+ * that we didn't already time out.
+ */
+ mutex_enter(pMtx);
+ pWait->fWantWakeup = true;
+ fGoToSleep = !ASMAtomicUoReadBool(&pWait->fTimedOut)
+ && ASMAtomicReadU32(pfState) == fCurState;
+ }
+
+ /*
+ * Do the waiting if that's still desirable.
+ * (rc > 0 - normal wake-up; rc == 0 - interruption; rc == -1 - timeout)
+ */
+ if (fGoToSleep)
+ {
+ if (pWait->fInterruptible)
+ {
+ int rc = cv_wait_sig(pCnd, pMtx);
+ if (RT_UNLIKELY(rc <= 0))
+ {
+ if (RT_LIKELY(rc == 0))
+ pWait->fInterrupted = true;
+ else
+ AssertMsgFailed(("rc=%d\n", rc)); /* no timeouts, see above! */
+ }
+ }
+ else
+ cv_wait(pCnd, pMtx);
+ }
+
+ /*
+ * Remove the timeout callback. Drop the lock while we're doing that
+ * to reduce lock contention / deadlocks. Before dropping the lock,
+ * indicate that the callback shouldn't do anything.
+ *
+ * (Too bad we are stuck with the cv_* API here, it's doing a little
+ * bit too much.)
+ */
+ if (fHasTimeout)
+ {
+ pWait->fWantWakeup = false;
+ ASMAtomicWritePtr(&pWait->pvMtx, NULL);
+ mutex_exit(pMtx);
+
+ if (pWait->fHighRes)
+ {
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ if (g_pfnrtR0Sol_timeout_generic != NULL)
+#endif
+ g_pfnrtR0Sol_untimeout_generic(u.idCo, 0 /*nowait*/);
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ else
+ {
+ mutex_enter(&cpu_lock);
+ if (pWait->idCy != CYCLIC_NONE)
+ {
+ cyclic_remove(pWait->idCy);
+ pWait->idCy = CYCLIC_NONE;
+ }
+ mutex_exit(&cpu_lock);
+ }
+#endif
+ }
+ else
+ untimeout(u.idTom);
+
+ mutex_enter(pMtx);
+ }
+}
+
+
+/**
+ * Checks if a solaris wait was interrupted.
+ *
+ * @returns true / false
+ * @param pWait The wait structure.
+ * @remarks This shall be called before the first rtR0SemSolWaitDoIt().
+ */
+DECLINLINE(bool) rtR0SemSolWaitWasInterrupted(PRTR0SEMSOLWAIT pWait)
+{
+ return pWait->fInterrupted;
+}
+
+
+/**
+ * Checks if a solaris wait has timed out.
+ *
+ * @returns true / false
+ * @param pWait The wait structure.
+ */
+DECLINLINE(bool) rtR0SemSolWaitHasTimedOut(PRTR0SEMSOLWAIT pWait)
+{
+ return pWait->fTimedOut;
+}
+
+
+/**
+ * Deletes a solaris wait.
+ *
+ * @param pWait The wait structure.
+ */
+DECLINLINE(void) rtR0SemSolWaitDelete(PRTR0SEMSOLWAIT pWait)
+{
+ pWait->pThread = NULL;
+}
+
+
+/**
+ * Enters the mutex, unpinning the underlying current thread if contended and
+ * we're on an interrupt thread.
+ *
+ * The unpinning is done to prevent a deadlock, see s this could lead to a
+ * deadlock (see @bugref{4259} for the full explanation)
+ *
+ * @param pMtx The mutex to enter.
+ */
+DECLINLINE(void) rtR0SemSolWaitEnterMutexWithUnpinningHack(kmutex_t *pMtx)
+{
+ int fAcquired = mutex_tryenter(pMtx);
+ if (!fAcquired)
+ {
+ /*
+ * Note! This assumes nobody is using the RTThreadPreemptDisable() in an
+ * interrupt context and expects it to work right. The swtch will
+ * result in a voluntary preemption. To fix this, we would have to
+ * do our own counting in RTThreadPreemptDisable/Restore() like we do
+ * on systems which doesn't do preemption (OS/2, linux, ...) and
+ * check whether preemption was disabled via RTThreadPreemptDisable()
+ * or not and only call swtch if RTThreadPreemptDisable() wasn't called.
+ */
+ kthread_t **ppIntrThread = SOL_THREAD_TINTR_PTR;
+ if ( *ppIntrThread
+ && getpil() < DISP_LEVEL)
+ {
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RTThreadPreemptDisable(&PreemptState);
+ preempt();
+ RTThreadPreemptRestore(&PreemptState);
+ }
+ mutex_enter(pMtx);
+ }
+}
+
+
+/**
+ * Gets the max resolution of the timeout machinery.
+ *
+ * @returns Resolution specified in nanoseconds.
+ */
+DECLINLINE(uint32_t) rtR0SemSolWaitGetResolution(void)
+{
+ return g_pfnrtR0Sol_timeout_generic != NULL
+ ? RTR0SEMSOLWAIT_RESOLUTION
+ : cyclic_getres();
+}
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_solaris_semeventwait_r0drv_solaris_h */
+
diff --git a/src/VBox/Runtime/r0drv/solaris/semfastmutex-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/semfastmutex-r0drv-solaris.c
new file mode 100644
index 00000000..d2fa43d3
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/semfastmutex-r0drv-solaris.c
@@ -0,0 +1,120 @@
+/* $Id: semfastmutex-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Fast Mutex Semaphores, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the Solaris mutex.
+ */
+typedef struct RTSEMFASTMUTEXINTERNAL
+{
+ /** Magic value (RTSEMFASTMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** The Solaris mutex. */
+ krwlock_t Mtx;
+} RTSEMFASTMUTEXINTERNAL, *PRTSEMFASTMUTEXINTERNAL;
+
+
+
+RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx)
+{
+ AssertCompile(sizeof(RTSEMFASTMUTEXINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phFastMtx, VERR_INVALID_POINTER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ PRTSEMFASTMUTEXINTERNAL pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMFASTMUTEX_MAGIC;
+ rw_init (&pThis->Mtx, "RWLOCK", RW_DRIVER, NULL);
+
+ *phFastMtx = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ if (pThis == NIL_RTSEMFASTMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+
+ ASMAtomicXchgU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD);
+ rw_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRequest(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_PREEMPTIBLE();
+
+ rw_enter(&pThis->Mtx, RW_WRITER);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+
+ rw_exit(&pThis->Mtx);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c
new file mode 100644
index 00000000..6a3b4576
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c
@@ -0,0 +1,387 @@
+/* $Id: semmutex-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Mutex Semaphores, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMMUTEX_WITHOUT_REMAPPING
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/mem.h>
+#include <iprt/err.h>
+#include <iprt/list.h>
+#include <iprt/thread.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the solaris semaphore structure.
+ */
+typedef struct RTSEMMUTEXINTERNAL
+{
+ /** Magic value (RTSEMMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** The number of recursions. */
+ uint32_t cRecursions;
+ /** The number of threads waiting for the mutex. */
+ uint32_t volatile cWaiters;
+ /** The number of threads referencing us. */
+ uint32_t volatile cRefs;
+ /** The owner thread, NIL_RTNATIVETHREAD if none. */
+ RTNATIVETHREAD hOwnerThread;
+ /** The mutex object for synchronization. */
+ kmutex_t Mtx;
+ /** The condition variable for synchronization. */
+ kcondvar_t Cnd;
+} RTSEMMUTEXINTERNAL, *PRTSEMMUTEXINTERNAL;
+
+
+RTDECL(int) RTSemMutexCreate(PRTSEMMUTEX phMtx)
+{
+ /*
+ * Allocate.
+ */
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (RT_UNLIKELY(!pThis))
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize.
+ */
+ pThis->u32Magic = RTSEMMUTEX_MAGIC;
+ pThis->cRecursions = 0;
+ pThis->cWaiters = 0;
+ pThis->cRefs = 1;
+ pThis->hOwnerThread = NIL_RTNATIVETHREAD;
+ mutex_init(&pThis->Mtx, "IPRT Mutex", MUTEX_DRIVER, (void *)ipltospl(DISP_LEVEL));
+ cv_init(&pThis->Cnd, "IPRT CVM", CV_DRIVER, NULL);
+ *phMtx = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMtx)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMtx;
+
+ /*
+ * Validate.
+ */
+ if (pThis == NIL_RTSEMMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+
+ mutex_enter(&pThis->Mtx);
+
+ ASMAtomicDecU32(&pThis->cRefs);
+
+ /*
+ * Invalidate the magic to indicate the mutex is being destroyed.
+ */
+ ASMAtomicIncU32(&pThis->u32Magic);
+ if (pThis->cWaiters > 0)
+ {
+ /*
+ * Wake up all waiters, last waiter thread cleans up.
+ */
+ cv_broadcast(&pThis->Cnd);
+ mutex_exit(&pThis->Mtx);
+ }
+ else if (pThis->cRefs == 0)
+ {
+ /*
+ * We're the last waiter, destroy.
+ */
+ mutex_exit(&pThis->Mtx);
+ cv_destroy(&pThis->Cnd);
+ mutex_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+ }
+ else
+ {
+ /*
+ * We're not the last waiting thread to be woken up. Just relinquish & bail.
+ */
+ mutex_exit(&pThis->Mtx);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for rtSemMutexSolRequest that handles the case where we go to sleep.
+ *
+ * @returns VINF_SUCCESS, VERR_INTERRUPTED, or VERR_SEM_DESTROYED.
+ * Returns without owning the mutex.
+ * @param pThis The mutex instance.
+ * @param cMillies The timeout, must be > 0 or RT_INDEFINITE_WAIT.
+ * @param fInterruptible The wait type.
+ *
+ * @remarks This needs to be called with the mutex object held!
+ */
+static int rtSemMutexSolRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
+ bool fInterruptible)
+{
+ int rc = VERR_GENERAL_FAILURE;
+ Assert(cMillies > 0);
+
+ /*
+ * Now we wait (sleep; although might spin and then sleep) & reference the mutex.
+ */
+ ASMAtomicIncU32(&pThis->cWaiters);
+ ASMAtomicIncU32(&pThis->cRefs);
+
+ if (cMillies != RT_INDEFINITE_WAIT)
+ {
+ clock_t cTicks = drv_usectohz((clock_t)(cMillies * 1000L));
+ clock_t cTimeout = ddi_get_lbolt();
+ cTimeout += cTicks;
+ if (fInterruptible)
+ rc = cv_timedwait_sig(&pThis->Cnd, &pThis->Mtx, cTimeout);
+ else
+ rc = cv_timedwait(&pThis->Cnd, &pThis->Mtx, cTimeout);
+ }
+ else
+ {
+ if (fInterruptible)
+ rc = cv_wait_sig(&pThis->Cnd, &pThis->Mtx);
+ else
+ {
+ cv_wait(&pThis->Cnd, &pThis->Mtx);
+ rc = 1;
+ }
+ }
+
+ ASMAtomicDecU32(&pThis->cWaiters);
+ if (rc > 0)
+ {
+ if (pThis->u32Magic == RTSEMMUTEX_MAGIC)
+ {
+ if (pThis->hOwnerThread == NIL_RTNATIVETHREAD)
+ {
+ /*
+ * Woken up by a release from another thread.
+ */
+ Assert(pThis->cRecursions == 0);
+ pThis->cRecursions = 1;
+ pThis->hOwnerThread = RTThreadNativeSelf();
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ /*
+ * Interrupted by some signal.
+ */
+ rc = VERR_INTERRUPTED;
+ }
+ }
+ else
+ {
+ /*
+ * Awakened due to the destruction-in-progress broadcast.
+ * We will cleanup if we're the last waiter.
+ */
+ rc = VERR_SEM_DESTROYED;
+ }
+ }
+ else if (rc == -1)
+ {
+ /*
+ * Timed out.
+ */
+ rc = VERR_TIMEOUT;
+ }
+ else
+ {
+ /*
+ * Condition may not have been met, returned due to pending signal.
+ */
+ rc = VERR_INTERRUPTED;
+ }
+
+ if (!ASMAtomicDecU32(&pThis->cRefs))
+ {
+ Assert(RT_FAILURE_NP(rc));
+ mutex_exit(&pThis->Mtx);
+ cv_destroy(&pThis->Cnd);
+ mutex_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+ return rc;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Internal worker.
+ */
+DECLINLINE(int) rtSemMutexSolRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fInterruptible)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ int rc = VERR_GENERAL_FAILURE;
+
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ Assert(pThis->cRefs >= 1);
+
+ /*
+ * Lock it and check if it's a recursion.
+ */
+ mutex_enter(&pThis->Mtx);
+ if (pThis->hOwnerThread == RTThreadNativeSelf())
+ {
+ pThis->cRecursions++;
+ Assert(pThis->cRecursions > 1);
+ Assert(pThis->cRecursions < 256);
+ rc = VINF_SUCCESS;
+ }
+ /*
+ * Not a recursion, claim the unowned mutex if we're there are no waiters.
+ */
+ else if ( pThis->hOwnerThread == NIL_RTNATIVETHREAD
+ && pThis->cWaiters == 0)
+ {
+ pThis->cRecursions = 1;
+ pThis->hOwnerThread = RTThreadNativeSelf();
+ rc = VINF_SUCCESS;
+ }
+ /*
+ * A polling call?
+ */
+ else if (cMillies == 0)
+ rc = VERR_TIMEOUT;
+ /*
+ * No, we really need to get to sleep.
+ */
+ else
+ rc = rtSemMutexSolRequestSleep(pThis, cMillies, fInterruptible);
+
+ mutex_exit(&pThis->Mtx);
+ return rc;
+}
+
+
+RTDECL(int) RTSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtSemMutexSolRequest(hMutexSem, cMillies, false /*fInterruptible*/);
+}
+
+
+RTDECL(int) RTSemMutexRequestDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ return RTSemMutexRequest(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtSemMutexSolRequest(hMutexSem, cMillies, true /*fInterruptible*/);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResumeDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ return RTSemMutexRequestNoResume(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMtx)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMtx;
+ int rc;
+
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+
+ /*
+ * Take the lock and release one recursion.
+ */
+ mutex_enter(&pThis->Mtx);
+ if (pThis->hOwnerThread == RTThreadNativeSelf())
+ {
+ Assert(pThis->cRecursions > 0);
+ if (--pThis->cRecursions == 0)
+ {
+ pThis->hOwnerThread = NIL_RTNATIVETHREAD;
+
+ /*
+ * If there are any waiters, signal one of them.
+ */
+ if (pThis->cWaiters > 0)
+ cv_signal(&pThis->Cnd);
+ }
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NOT_OWNER;
+
+ mutex_exit(&pThis->Mtx);
+ return rc;
+}
+
+
+RTDECL(bool) RTSemMutexIsOwned(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ bool fOwned = false;
+
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pThis, false);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), false);
+
+ /*
+ * Check if this is the owner.
+ */
+ mutex_enter(&pThis->Mtx);
+ fOwned = pThis->hOwnerThread != NIL_RTNATIVETHREAD;
+ mutex_exit(&pThis->Mtx);
+
+ return fOwned;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/spinlock-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/spinlock-r0drv-solaris.c
new file mode 100644
index 00000000..4e0f2d1b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/spinlock-r0drv-solaris.c
@@ -0,0 +1,204 @@
+/* $Id: spinlock-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Spinlocks, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/spinlock.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/thread.h>
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the struct mutex type.
+ */
+typedef struct RTSPINLOCKINTERNAL
+{
+ /** Spinlock magic value (RTSPINLOCK_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** Spinlock creation flags. */
+ uint32_t fFlags;
+ /** Saved interrupt flag. */
+ uint32_t volatile fIntSaved;
+ /** A Solaris spinlock. */
+ kmutex_t Mtx;
+#ifdef RT_MORE_STRICT
+ /** The idAssertCpu variable before acquring the lock for asserting after
+ * releasing the spinlock. */
+ RTCPUID volatile idAssertCpu;
+ /** The CPU that owns the lock. */
+ RTCPUID volatile idCpuOwner;
+#endif
+} RTSPINLOCKINTERNAL, *PRTSPINLOCKINTERNAL;
+
+
+
+RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER);
+
+ /*
+ * Allocate.
+ */
+ AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *));
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize & return.
+ */
+ pThis->u32Magic = RTSPINLOCK_MAGIC;
+ pThis->fFlags = fFlags;
+ pThis->fIntSaved = 0;
+ /** @todo Consider different PIL when not interrupt safe requirement. */
+ mutex_init(&pThis->Mtx, "IPRT Spinlock", MUTEX_SPIN, (void *)ipltospl(PIL_MAX));
+ *pSpinlock = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSpinlockDestroy(RTSPINLOCK Spinlock)
+{
+ /*
+ * Validate input.
+ */
+ RT_ASSERT_INTS_ON();
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertMsgReturn(pThis->u32Magic == RTSPINLOCK_MAGIC,
+ ("Invalid spinlock %p magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Make the lock invalid and release the memory.
+ */
+ ASMAtomicIncU32(&pThis->u32Magic);
+ mutex_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ uint32_t fIntSaved = ASMIntDisableFlags();
+#endif
+ mutex_enter(&pThis->Mtx);
+
+ /*
+ * Solaris 10 doesn't preserve the interrupt flag, but since we're at PIL_MAX we should be
+ * fine and not get interrupts while lock is held. Re-disable interrupts to not upset
+ * assertions & assumptions callers might have.
+ */
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ ASMIntDisable();
+#endif
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ Assert(!ASMIntAreEnabled());
+#endif
+ pThis->fIntSaved = fIntSaved;
+ }
+ else
+ {
+#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
+ bool fIntsOn = ASMIntAreEnabled();
+#endif
+
+ mutex_enter(&pThis->Mtx);
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ AssertMsg(fIntsOn == ASMIntAreEnabled(), ("fIntsOn=%RTbool\n", fIntsOn));
+#endif
+ }
+
+ RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
+}
+
+
+RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE_VARS();
+
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+ RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE(pThis);
+
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ uint32_t fIntSaved = pThis->fIntSaved;
+ pThis->fIntSaved = 0;
+#endif
+ mutex_exit(&pThis->Mtx);
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ ASMSetFlags(fIntSaved);
+#endif
+ }
+ else
+ {
+#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
+ bool fIntsOn = ASMIntAreEnabled();
+#endif
+
+ mutex_exit(&pThis->Mtx);
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ AssertMsg(fIntsOn == ASMIntAreEnabled(), ("fIntsOn=%RTbool\n", fIntsOn));
+#endif
+ }
+
+ RT_ASSERT_PREEMPT_CPUID();
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h b/src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h
new file mode 100644
index 00000000..4a69c979
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h
@@ -0,0 +1,234 @@
+/* $Id: the-solaris-kernel.h $ */
+/** @file
+ * IPRT - Include all necessary headers for the Solaris kernel.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_solaris_the_solaris_kernel_h
+#define IPRT_INCLUDED_SRC_r0drv_solaris_the_solaris_kernel_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <sys/kmem.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/thread.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/sdt.h>
+#include <sys/schedctl.h>
+#include <sys/time.h>
+#include <sys/sysmacros.h>
+#include <sys/cmn_err.h>
+#include <sys/vmsystm.h>
+#include <sys/cyclic.h>
+#include <sys/class.h>
+#include <sys/cpuvar.h>
+#include <sys/archsystm.h>
+#include <sys/x_call.h> /* in platform dir */
+#include <sys/x86_archext.h>
+#include <vm/hat.h>
+#include <vm/seg_vn.h>
+#include <vm/seg_kmem.h>
+#include <vm/page.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/spl.h>
+#include <sys/archsystm.h>
+#include <sys/callo.h>
+#include <sys/kobj.h>
+#include <sys/ctf_api.h>
+#include <sys/modctl.h>
+#include <sys/proc.h>
+#include <sys/t_lock.h>
+
+#undef u /* /usr/include/sys/user.h:249:1 is where this is defined to (curproc->p_user). very cool. */
+
+#include <iprt/cdefs.h>
+#include <iprt/types.h>
+#include <iprt/dbg.h>
+
+RT_C_DECLS_BEGIN
+
+/* IPRT functions. */
+DECLHIDDEN(void *) rtR0SolMemAlloc(uint64_t cbPhysHi, uint64_t *puPhys, size_t cb, uint64_t cbAlign, bool fContig);
+DECLHIDDEN(void) rtR0SolMemFree(void *pv, size_t cb);
+
+
+/* Solaris functions. */
+typedef callout_id_t (*PFNSOL_timeout_generic)(int type, void (*func)(void *),
+ void *arg, hrtime_t expiration,
+ hrtime_t resultion, int flags);
+typedef hrtime_t (*PFNSOL_untimeout_generic)(callout_id_t id, int nowait);
+typedef int (*PFNSOL_cyclic_reprogram)(cyclic_id_t id, hrtime_t expiration);
+typedef void (*PFNSOL_contig_free)(void *addr, size_t size);
+typedef int (*PFNSOL_page_noreloc_supported)(size_t cbPageSize);
+
+/* IPRT globals. */
+extern bool g_frtSolSplSetsEIF;
+extern RTCPUSET g_rtMpSolCpuSet;
+extern PFNSOL_timeout_generic g_pfnrtR0Sol_timeout_generic;
+extern PFNSOL_untimeout_generic g_pfnrtR0Sol_untimeout_generic;
+extern PFNSOL_cyclic_reprogram g_pfnrtR0Sol_cyclic_reprogram;
+extern PFNSOL_contig_free g_pfnrtR0Sol_contig_free;
+extern PFNSOL_page_noreloc_supported g_pfnrtR0Sol_page_noreloc_supported;
+extern size_t g_offrtSolThreadPreempt;
+extern size_t g_offrtSolThreadIntrThread;
+extern size_t g_offrtSolThreadLock;
+extern size_t g_offrtSolThreadProc;
+extern size_t g_offrtSolThreadId;
+extern size_t g_offrtSolCpuPreempt;
+extern size_t g_offrtSolCpuForceKernelPreempt;
+extern bool g_frtSolInitDone;
+extern RTDBGKRNLINFO g_hKrnlDbgInfo;
+
+/*
+ * Workarounds for running on old versions of solaris with different cross call
+ * interfaces. If we find xc_init_cpu() in the kernel, then just use the
+ * defined interfaces for xc_call() from the include file where the xc_call()
+ * interfaces just takes a pointer to a ulong_t array. The array must be long
+ * enough to hold "ncpus" bits at runtime.
+
+ * The reason for the hacks is that using the type "cpuset_t" is pretty much
+ * impossible from code built outside the Solaris source repository that wants
+ * to run on multiple releases of Solaris.
+ *
+ * For old style xc_call()s, 32 bit solaris and older 64 bit versions use
+ * "ulong_t" as cpuset_t.
+ *
+ * Later versions of 64 bit Solaris used: struct {ulong_t words[x];}
+ * where "x" depends on NCPU.
+ *
+ * We detect the difference in 64 bit support by checking the kernel value of
+ * max_cpuid, which always holds the compiled value of NCPU - 1.
+ *
+ * If Solaris increases NCPU to more than 256, VBox will continue to work on
+ * all versions of Solaris as long as the number of installed CPUs in the
+ * machine is <= IPRT_SOLARIS_NCPUS. If IPRT_SOLARIS_NCPUS is increased, this
+ * code has to be re-written some to provide compatibility with older Solaris
+ * which expects cpuset_t to be based on NCPU==256 -- or we discontinue
+ * support of old Nevada/S10.
+ */
+#define IPRT_SOL_NCPUS 256
+#define IPRT_SOL_SET_WORDS (IPRT_SOL_NCPUS / (sizeof(ulong_t) * 8))
+#define IPRT_SOL_X_CALL_HIPRI (2) /* for Old Solaris interface */
+typedef struct RTSOLCPUSET
+{
+ ulong_t auCpus[IPRT_SOL_SET_WORDS];
+} RTSOLCPUSET;
+typedef RTSOLCPUSET *PRTSOLCPUSET;
+
+/* Avoid warnings even if it means more typing... */
+typedef struct RTR0FNSOLXCCALL
+{
+ union
+ {
+ void *(*pfnSol_xc_call) (xc_arg_t, xc_arg_t, xc_arg_t, ulong_t *, xc_func_t);
+ void *(*pfnSol_xc_call_old) (xc_arg_t, xc_arg_t, xc_arg_t, int, RTSOLCPUSET, xc_func_t);
+ void *(*pfnSol_xc_call_old_ulong)(xc_arg_t, xc_arg_t, xc_arg_t, int, ulong_t, xc_func_t);
+ } u;
+} RTR0FNSOLXCCALL;
+typedef RTR0FNSOLXCCALL *PRTR0FNSOLXCCALL;
+
+extern RTR0FNSOLXCCALL g_rtSolXcCall;
+extern bool g_frtSolOldIPI;
+extern bool g_frtSolOldIPIUlong;
+
+/*
+ * Thread-context hooks.
+ * Workarounds for older Solaris versions that did not have the exitctx() callback.
+ */
+typedef struct RTR0FNSOLTHREADCTX
+{
+ union
+ {
+ void *(*pfnSol_installctx) (kthread_t *pThread, void *pvArg,
+ void (*pfnSave)(void *pvArg),
+ void (*pfnRestore)(void *pvArg),
+ void (*pfnFork)(void *pvThread, void *pvThreadFork),
+ void (*pfnLwpCreate)(void *pvThread, void *pvThreadCreate),
+ void (*pfnExit)(void *pvThread),
+ void (*pfnFree)(void *pvArg, int fIsExec));
+
+ void *(*pfnSol_installctx_old) (kthread_t *pThread, void *pvArg,
+ void (*pfnSave)(void *pvArg),
+ void (*pfnRestore)(void *pvArg),
+ void (*pfnFork)(void *pvThread, void *pvThreadFork),
+ void (*pfnLwpCreate)(void *pvThread, void *pvThreadCreate),
+ void (*pfnFree)(void *pvArg, int fIsExec));
+ } Install;
+
+ union
+ {
+ int (*pfnSol_removectx) (kthread_t *pThread, void *pvArg,
+ void (*pfnSave)(void *pvArg),
+ void (*pfnRestore)(void *pvArg),
+ void (*pfnFork)(void *pvThread, void *pvThreadFork),
+ void (*pfnLwpCreate)(void *pvThread, void *pvThreadCreate),
+ void (*pfnExit)(void *pvThread),
+ void (*pfnFree)(void *pvArg, int fIsExec));
+
+ int (*pfnSol_removectx_old) (kthread_t *pThread, void *pvArg,
+ void (*pfnSave)(void *pvArg),
+ void (*pfnRestore)(void *pvArg),
+ void (*pfnFork)(void *pvThread, void *pvThreadFork),
+ void (*pfnLwpCreate)(void *pvThread, void *pvThreadCreate),
+ void (*pfnFree)(void *pvArg, int fIsExec));
+ } Remove;
+} RTR0FNSOLTHREADCTX;
+typedef RTR0FNSOLTHREADCTX *PRTR0FNSOLTHREADCTX;
+
+extern RTR0FNSOLTHREADCTX g_rtSolThreadCtx;
+extern bool g_frtSolOldThreadCtx;
+
+/*
+ * Workaround for older Solaris versions which called map_addr()/choose_addr()/
+ * map_addr_proc() with an 'alignment' argument that was removed in Solaris
+ * 11.4.
+ */
+typedef struct RTR0FNSOLMAPADDR
+{
+ union
+ {
+ void *(*pfnSol_map_addr) (caddr_t *, size_t, offset_t, uint_t);
+ void *(*pfnSol_map_addr_old) (caddr_t *, size_t, offset_t, int, uint_t);
+ } u;
+} RTR0FNSOLMAPADDR;
+typedef RTR0FNSOLMAPADDR *PRTR0FNSOLMAPADDR;
+
+extern RTR0FNSOLMAPADDR g_rtSolMapAddr;
+extern bool g_frtSolOldMapAddr;
+
+/* Solaris globals. */
+extern uintptr_t kernelbase;
+
+/* Misc stuff from newer kernels. */
+#ifndef CALLOUT_FLAG_ABSOLUTE
+# define CALLOUT_FLAG_ABSOLUTE 2
+#endif
+
+RT_C_DECLS_END
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_solaris_the_solaris_kernel_h */
+
diff --git a/src/VBox/Runtime/r0drv/solaris/thread-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/thread-r0drv-solaris.c
new file mode 100644
index 00000000..a7782046
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/thread-r0drv-solaris.c
@@ -0,0 +1,185 @@
+/* $Id: thread-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Threads, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mp.h>
+
+#define SOL_THREAD_PREEMPT (*((char *)curthread + g_offrtSolThreadPreempt))
+#define SOL_CPU_RUNRUN (*((char *)CPU + g_offrtSolCpuPreempt))
+#define SOL_CPU_KPRUNRUN (*((char *)CPU + g_offrtSolCpuForceKernelPreempt))
+
+RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
+{
+ return (RTNATIVETHREAD)curthread;
+}
+
+
+static int rtR0ThreadSolSleepCommon(RTMSINTERVAL cMillies)
+{
+ clock_t cTicks;
+ RT_ASSERT_PREEMPTIBLE();
+
+ if (!cMillies)
+ {
+ RTThreadYield();
+ return VINF_SUCCESS;
+ }
+
+ if (cMillies != RT_INDEFINITE_WAIT)
+ cTicks = drv_usectohz((clock_t)(cMillies * 1000L));
+ else
+ cTicks = 0;
+
+ delay(cTicks);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadSolSleepCommon(cMillies);
+}
+
+
+RTDECL(int) RTThreadSleepNoLog(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadSolSleepCommon(cMillies);
+}
+
+
+RTDECL(bool) RTThreadYield(void)
+{
+ RT_ASSERT_PREEMPTIBLE();
+
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RTThreadPreemptDisable(&PreemptState);
+
+ char const bThreadPreempt = SOL_THREAD_PREEMPT;
+ char const bForcePreempt = SOL_CPU_KPRUNRUN;
+ bool fWillYield = false;
+ Assert(bThreadPreempt >= 1);
+
+ /*
+ * If we are the last preemption enabler for this thread and if force
+ * preemption is set on the CPU, only then we are guaranteed to be preempted.
+ */
+ if (bThreadPreempt == 1 && bForcePreempt != 0)
+ fWillYield = true;
+
+ RTThreadPreemptRestore(&PreemptState);
+ return fWillYield;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+ if (RT_UNLIKELY(g_frtSolInitDone == false))
+ {
+ cmn_err(CE_CONT, "!RTThreadPreemptIsEnabled called before RTR0Init!\n");
+ return true;
+ }
+
+ bool fThreadPreempt = false;
+ if (SOL_THREAD_PREEMPT == 0)
+ fThreadPreempt = true;
+
+ if (!fThreadPreempt)
+ return false;
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ if (!ASMIntAreEnabled())
+ return false;
+#endif
+ if (getpil() >= DISP_LEVEL)
+ return false;
+ return true;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+
+ char const bPreempt = SOL_CPU_RUNRUN;
+ char const bForcePreempt = SOL_CPU_KPRUNRUN;
+ return (bPreempt != 0 || bForcePreempt != 0);
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
+{
+ /* yes, RTThreadPreemptIsPending is reliable. */
+ return true;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPossible(void)
+{
+ /* yes, kernel preemption is possible. */
+ return true;
+}
+
+
+RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+
+ SOL_THREAD_PREEMPT++;
+ Assert(SOL_THREAD_PREEMPT >= 1);
+
+ RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
+}
+
+
+RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
+
+ Assert(SOL_THREAD_PREEMPT >= 1);
+ if (--SOL_THREAD_PREEMPT == 0 && SOL_CPU_RUNRUN != 0)
+ kpreempt(KPREEMPT_SYNC);
+}
+
+
+RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+ return servicing_interrupt() ? true : false;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/thread2-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/thread2-r0drv-solaris.c
new file mode 100644
index 00000000..c70b7c3f
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/thread2-r0drv-solaris.c
@@ -0,0 +1,150 @@
+/* $Id: thread2-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Threads (Part 2), Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+#include <iprt/process.h>
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include "internal/thread.h"
+
+#define SOL_THREAD_ID_PTR ((uint64_t *)((char *)curthread + g_offrtSolThreadId))
+#define SOL_THREAD_LOCKP_PTR ((disp_lock_t **)((char *)curthread + g_offrtSolThreadLock))
+
+DECLHIDDEN(int) rtThreadNativeInit(void)
+{
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(RTTHREAD) RTThreadSelf(void)
+{
+ return rtThreadGetByNative(RTThreadNativeSelf());
+}
+
+
+DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
+{
+ int iPriority;
+ disp_lock_t **ppDispLock;
+ switch (enmType)
+ {
+ case RTTHREADTYPE_INFREQUENT_POLLER: iPriority = 60; break;
+ case RTTHREADTYPE_EMULATION: iPriority = 66; break;
+ case RTTHREADTYPE_DEFAULT: iPriority = 72; break;
+ case RTTHREADTYPE_MSG_PUMP: iPriority = 78; break;
+ case RTTHREADTYPE_IO: iPriority = 84; break;
+ case RTTHREADTYPE_TIMER: iPriority = 99; break;
+ default:
+ AssertMsgFailed(("enmType=%d\n", enmType));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ Assert(curthread);
+ thread_lock(curthread);
+ thread_change_pri(curthread, iPriority, 0);
+
+ /*
+ * thread_unlock() is a macro calling disp_lock_exit() with the thread's dispatcher lock.
+ * We need to dereference the offset manually here (for S10, S11 compatibility) rather than
+ * using the macro.
+ */
+ ppDispLock = SOL_THREAD_LOCKP_PTR;
+ disp_lock_exit(*ppDispLock);
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtThreadNativeAdopt(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+ /* There is nothing special that needs doing here, but the
+ user really better know what he's cooking. */
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtThreadNativeWaitKludge(PRTTHREADINT pThread)
+{
+ thread_join(pThread->tid);
+}
+
+
+DECLHIDDEN(void) rtThreadNativeDestroy(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+}
+
+
+/**
+ * Native thread main function.
+ *
+ * @param pvThreadInt The thread structure.
+ */
+static void rtThreadNativeMain(void *pvThreadInt)
+{
+ PRTTHREADINT pThreadInt = (PRTTHREADINT)pvThreadInt;
+
+ AssertCompile(sizeof(kt_did_t) == sizeof(pThreadInt->tid));
+ uint64_t *pu64ThrId = SOL_THREAD_ID_PTR;
+ pThreadInt->tid = *pu64ThrId;
+ rtThreadMain(pThreadInt, RTThreadNativeSelf(), &pThreadInt->szName[0]);
+ thread_exit();
+}
+
+
+DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
+{
+ kthread_t *pThread;
+ RT_ASSERT_PREEMPTIBLE();
+
+ pThreadInt->tid = UINT64_MAX;
+
+ pThread = thread_create(NULL, /* Stack, use base */
+ 0, /* Stack size */
+ rtThreadNativeMain, /* Thread function */
+ pThreadInt, /* Function data */
+ 0, /* Data size */
+ &p0, /* Process 0 handle */
+ TS_RUN, /* Ready to run */
+ minclsyspri /* Priority */
+ );
+ if (RT_LIKELY(pThread))
+ {
+ *pNativeThread = (RTNATIVETHREAD)pThread;
+ return VINF_SUCCESS;
+ }
+
+ return VERR_OUT_OF_RESOURCES;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/threadctxhooks-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/threadctxhooks-r0drv-solaris.c
new file mode 100644
index 00000000..665549f0
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/threadctxhooks-r0drv-solaris.c
@@ -0,0 +1,349 @@
+/* $Id: threadctxhooks-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Thread Context Switching Hook, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2013-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/mem.h>
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+#include <iprt/errcore.h>
+#include <iprt/asm.h>
+#include <iprt/log.h>
+#include "internal/thread.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The internal hook object for solaris.
+ */
+typedef struct RTTHREADCTXHOOKINT
+{
+ /** Magic value (RTTHREADCTXHOOKINT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The thread handle (owner) for which the context-hooks are registered. */
+ RTNATIVETHREAD hOwner;
+ /** Pointer to the registered callback function. */
+ PFNRTTHREADCTXHOOK pfnCallback;
+ /** User argument passed to the callback function. */
+ void *pvUser;
+ /** Whether the hook is enabled or not. */
+ bool volatile fEnabled;
+ /** Number of references to this object. */
+ uint32_t volatile cRefs;
+} RTTHREADCTXHOOKINT;
+typedef RTTHREADCTXHOOKINT *PRTTHREADCTXHOOKINT;
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** Validates a hook handle and returns rc if not valid. */
+#define RTTHREADCTX_VALID_RETURN_RC(pThis, rc) \
+ do { \
+ AssertPtrReturn((pThis), (rc)); \
+ AssertReturn((pThis)->u32Magic == RTTHREADCTXHOOKINT_MAGIC, (rc)); \
+ AssertReturn((pThis)->cRefs > 0, (rc)); \
+ } while (0)
+
+
+/**
+ * Hook function for the thread-save event.
+ *
+ * @param pvThreadCtxInt Opaque pointer to the internal hook object.
+ *
+ * @remarks Called with the with preemption disabled!
+ */
+static void rtThreadCtxHookSolOut(void *pvThreadCtxInt)
+{
+ PRTTHREADCTXHOOKINT pThis = (PRTTHREADCTXHOOKINT)pvThreadCtxInt;
+ AssertPtr(pThis);
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ Assert(pThis->cRefs > 0);
+
+ if (pThis->fEnabled)
+ {
+ Assert(pThis->pfnCallback);
+ pThis->pfnCallback(RTTHREADCTXEVENT_OUT, pThis->pvUser);
+ }
+}
+
+
+/**
+ * Hook function for the thread-restore event.
+ *
+ * @param pvThreadCtxInt Opaque pointer to the internal hook object.
+ *
+ * @remarks Called with preemption disabled!
+ */
+static void rtThreadCtxHookSolIn(void *pvThreadCtxInt)
+{
+ PRTTHREADCTXHOOKINT pThis = (PRTTHREADCTXHOOKINT)pvThreadCtxInt;
+ AssertPtr(pThis);
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ Assert(pThis->cRefs > 0);
+
+ if (pThis->fEnabled)
+ {
+ Assert(pThis->pfnCallback);
+ pThis->pfnCallback(RTTHREADCTXEVENT_IN, pThis->pvUser);
+ }
+}
+
+
+/**
+ * Hook function for the thread-free event.
+ *
+ * This is used for making sure the hook object is safely released - see
+ * RTThreadCtxHookRelease for details.
+ *
+ * @param pvThreadCtxInt Opaque pointer to the internal hook object.
+ * @param fIsExec Whether this event is triggered due to exec().
+ */
+static void rtThreadCtxHookSolFree(void *pvThreadCtxInt, int fIsExec)
+{
+ PRTTHREADCTXHOOKINT pThis = (PRTTHREADCTXHOOKINT)pvThreadCtxInt;
+ AssertPtrReturnVoid(pThis);
+ AssertMsgReturnVoid(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis));
+
+ uint32_t cRefs = ASMAtomicReadU32(&pThis->cRefs);
+ if (cRefs > 0)
+ {
+ cRefs = ASMAtomicDecU32(&pThis->cRefs);
+ if (!cRefs)
+ {
+ Assert(!pThis->fEnabled);
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTTHREADCTXHOOKINT_MAGIC);
+ RTMemFree(pThis);
+ }
+ }
+ else
+ {
+ /* Should never happen. */
+ AssertMsgFailed(("rtThreadCtxHookSolFree with cRefs=0 pThis=%p\n", pThis));
+ }
+}
+
+
+RTDECL(int) RTThreadCtxHookCreate(PRTTHREADCTXHOOK phCtxHook, uint32_t fFlags, PFNRTTHREADCTXHOOK pfnCallback, void *pvUser)
+{
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis;
+ Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+ AssertReturn(fFlags == 0, VERR_INVALID_FLAGS);
+
+ /*
+ * Allocate and initialize a new hook.
+ */
+ pThis = (PRTTHREADCTXHOOKINT)RTMemAllocZ(sizeof(*pThis));
+ if (RT_UNLIKELY(!pThis))
+ return VERR_NO_MEMORY;
+ pThis->u32Magic = RTTHREADCTXHOOKINT_MAGIC;
+ pThis->hOwner = RTThreadNativeSelf();
+ pThis->pfnCallback = pfnCallback;
+ pThis->pvUser = pvUser;
+ pThis->fEnabled = false;
+ pThis->cRefs = 2; /* One reference for the thread, one for the caller. */
+
+ /*
+ * installctx() allocates memory and thus cannot be used in RTThreadCtxHookRegister() which can be used
+ * with preemption disabled. We allocate the context-hooks here and use 'fEnabled' to determine if we can
+ * invoke the consumer's hook or not.
+ */
+ if (g_frtSolOldThreadCtx)
+ {
+ g_rtSolThreadCtx.Install.pfnSol_installctx_old(curthread,
+ pThis,
+ rtThreadCtxHookSolOut, /* save */
+ rtThreadCtxHookSolIn, /* restore */
+ NULL, /* fork */
+ NULL, /* lwp_create */
+ rtThreadCtxHookSolFree);
+ }
+ else
+ {
+ g_rtSolThreadCtx.Install.pfnSol_installctx(curthread,
+ pThis,
+ rtThreadCtxHookSolOut, /* save */
+ rtThreadCtxHookSolIn, /* restore */
+ NULL, /* fork */
+ NULL, /* lwp_create */
+ NULL, /* exit */
+ rtThreadCtxHookSolFree);
+ }
+
+ *phCtxHook = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTThreadCtxHookDestroy(RTTHREADCTXHOOK hCtxHook)
+{
+ /*
+ * Validate input, ignoring NIL.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ if (pThis == NIL_RTTHREADCTXHOOK)
+ return VINF_SUCCESS;
+ RTTHREADCTX_VALID_RETURN_RC(hCtxHook, VERR_INVALID_HANDLE);
+ Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ Assert(!pThis->fEnabled || pThis->hOwner == RTThreadNativeSelf());
+
+ /*
+ * Make sure it's disabled.
+ */
+ ASMAtomicWriteBool(&pThis->fEnabled, false);
+
+ /*
+ * Decrement.
+ */
+ uint32_t cRefs = ASMAtomicDecU32(&pThis->cRefs);
+ if ( cRefs == 1
+ && pThis->hOwner == RTThreadNativeSelf())
+ {
+ /*
+ * removectx() will invoke rtThreadCtxHookSolFree() and there is no way to bypass it and still use
+ * rtThreadCtxHookSolFree() at the same time. Hence the convulated reference counting.
+ *
+ * When this function is called from the owner thread and is the last reference, we call removectx() which
+ * will invoke rtThreadCtxHookSolFree() with cRefs = 1 and that will then free the hook object.
+ *
+ * When the function is called from a different thread, we simply decrement the reference. Whenever the
+ * ring-0 thread dies, Solaris will call rtThreadCtxHookSolFree() which will free the hook object.
+ */
+ int rc;
+ if (g_frtSolOldThreadCtx)
+ {
+ rc = g_rtSolThreadCtx.Remove.pfnSol_removectx_old(curthread,
+ pThis,
+ rtThreadCtxHookSolOut, /* save */
+ rtThreadCtxHookSolIn, /* restore */
+ NULL, /* fork */
+ NULL, /* lwp_create */
+ rtThreadCtxHookSolFree);
+ }
+ else
+ {
+ rc = g_rtSolThreadCtx.Remove.pfnSol_removectx(curthread,
+ pThis,
+ rtThreadCtxHookSolOut, /* save */
+ rtThreadCtxHookSolIn, /* restore */
+ NULL, /* fork */
+ NULL, /* lwp_create */
+ NULL, /* exit */
+ rtThreadCtxHookSolFree);
+ }
+ AssertMsg(rc, ("removectx() failed. rc=%d\n", rc));
+ NOREF(rc);
+
+#if 0 /*def RT_STRICT - access after free */
+ cRefs = ASMAtomicReadU32(&pThis->cRefs);
+ Assert(!cRefs);
+#endif
+ cRefs = 0;
+ }
+ else if (!cRefs)
+ {
+ /*
+ * The ring-0 thread for this hook object has already died. Free up the object as we have no more references.
+ */
+ Assert(pThis->hOwner != RTThreadNativeSelf());
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTTHREADCTXHOOKINT_MAGIC);
+ RTMemFree(pThis);
+ }
+
+ return cRefs;
+}
+
+
+RTDECL(int) RTThreadCtxHookEnable(RTTHREADCTXHOOK hCtxHook)
+{
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ AssertPtr(pThis);
+ AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
+ VERR_INVALID_HANDLE);
+ Assert(pThis->hOwner == RTThreadNativeSelf());
+ Assert(!pThis->fEnabled);
+
+ /*
+ * Mark it as enabled.
+ */
+ pThis->fEnabled = true;
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTThreadCtxHookDisable(RTTHREADCTXHOOK hCtxHook)
+{
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ if (pThis != NIL_RTTHREADCTXHOOK)
+ {
+ AssertPtr(pThis);
+ AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
+ VERR_INVALID_HANDLE);
+ Assert(pThis->hOwner == RTThreadNativeSelf());
+
+ /*
+ * Mark it as disabled.
+ */
+ pThis->fEnabled = false;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(bool) RTThreadCtxHookIsEnabled(RTTHREADCTXHOOK hCtxHook)
+{
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ if (pThis == NIL_RTTHREADCTXHOOK)
+ return false;
+ AssertPtr(pThis);
+ AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
+ false);
+
+ return pThis->fEnabled;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/time-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/time-r0drv-solaris.c
new file mode 100644
index 00000000..b5514b47
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/time-r0drv-solaris.c
@@ -0,0 +1,70 @@
+/* $Id: time-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Time, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTTIME_INCL_TIMESPEC
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/time.h>
+
+
+RTDECL(uint64_t) RTTimeNanoTS(void)
+{
+ return (uint64_t)gethrtime();
+}
+
+
+RTDECL(uint64_t) RTTimeMilliTS(void)
+{
+ return RTTimeNanoTS() / RT_NS_1MS;
+}
+
+
+RTDECL(uint64_t) RTTimeSystemNanoTS(void)
+{
+ return RTTimeNanoTS();
+}
+
+
+RTDECL(uint64_t) RTTimeSystemMilliTS(void)
+{
+ return RTTimeNanoTS() / RT_NS_1MS;
+}
+
+
+RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
+{
+ timestruc_t TimeSpec;
+
+ mutex_enter(&tod_lock);
+ TimeSpec = tod_get();
+ mutex_exit(&tod_lock);
+ return RTTimeSpecSetNano(pTime, (uint64_t)TimeSpec.tv_sec * RT_NS_1SEC + TimeSpec.tv_nsec);
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c
new file mode 100644
index 00000000..8cb91cc6
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c
@@ -0,0 +1,650 @@
+/* $Id: timer-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Timer, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/timer.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/spinlock.h>
+#include <iprt/time.h>
+#include <iprt/thread.h>
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The internal representation of a Solaris timer handle.
+ */
+typedef struct RTTIMER
+{
+ /** Magic.
+ * This is RTTIMER_MAGIC, but changes to something else before the timer
+ * is destroyed to indicate clearly that thread should exit. */
+ uint32_t volatile u32Magic;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** Flag indicating that the timer is suspended (hCyclicId should be
+ * CYCLIC_NONE). */
+ bool volatile fSuspended;
+ /** Flag indicating that the timer was suspended from the timer callback and
+ * therefore the hCyclicId may still be valid. */
+ bool volatile fSuspendedFromTimer;
+ /** Flag indicating that the timer interval was changed and that it requires
+ * manual expiration time programming for each callout. */
+ bool volatile fIntervalChanged;
+ /** Whether the timer must run on all CPUs or not. */
+ uint8_t fAllCpus;
+ /** Whether the timer must run on a specific CPU or not. */
+ uint8_t fSpecificCpu;
+ /** The CPU it must run on if fSpecificCpu is set. */
+ uint32_t iCpu;
+ /** The nano second interval for repeating timers. */
+ uint64_t volatile cNsInterval;
+ /** Cyclic timer Id. This is CYCLIC_NONE if no active timer.
+ * @remarks Please keep in mind that cyclic may call us back before the
+ * cyclic_add/cyclic_add_omni functions returns, so don't use this
+ * unguarded with cyclic_reprogram. */
+ cyclic_id_t hCyclicId;
+ /** The user callback. */
+ PFNRTTIMER pfnTimer;
+ /** The argument for the user callback. */
+ void *pvUser;
+ /** Union with timer type specific data. */
+ union
+ {
+ /** Single timer (fAllCpus == false). */
+ struct
+ {
+ /** Timer ticks. */
+ uint64_t u64Tick;
+ /** The next tick when fIntervalChanged is true, otherwise 0. */
+ uint64_t nsNextTick;
+ /** The (interrupt) thread currently active in the callback. */
+ kthread_t * volatile pActiveThread;
+ } Single;
+
+ /** Omni timer (fAllCpus == true). */
+ struct
+ {
+ /** Absolute timestamp of when the timer should fire first when starting up. */
+ uint64_t u64When;
+ /** Array of per CPU data (variable size). */
+ struct
+ {
+ /** Timer ticks (reinitialized when online'd). */
+ uint64_t u64Tick;
+ /** The (interrupt) thread currently active in the callback. */
+ kthread_t * volatile pActiveThread;
+ /** The next tick when fIntervalChanged is true, otherwise 0. */
+ uint64_t nsNextTick;
+ } aPerCpu[1];
+ } Omni;
+ } u;
+} RTTIMER;
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** Validates that the timer is valid. */
+#define RTTIMER_ASSERT_VALID_RET(pTimer) \
+ do \
+ { \
+ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); \
+ AssertMsgReturn((pTimer)->u32Magic == RTTIMER_MAGIC, ("pTimer=%p u32Magic=%x expected %x\n", (pTimer), (pTimer)->u32Magic, RTTIMER_MAGIC), \
+ VERR_INVALID_HANDLE); \
+ } while (0)
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void rtTimerSolSingleCallbackWrapper(void *pvArg);
+static void rtTimerSolStopIt(PRTTIMER pTimer);
+
+
+/**
+ * Retains a reference to the timer.
+ *
+ * @returns New reference counter value.
+ * @param pTimer The timer.
+ */
+DECLINLINE(uint32_t) rtTimerSolRetain(PRTTIMER pTimer)
+{
+ return ASMAtomicIncU32(&pTimer->cRefs);
+}
+
+
+/**
+ * Destroys the timer when the reference counter has reached zero.
+ *
+ * @returns 0 (new references counter value).
+ * @param pTimer The timer.
+ */
+static uint32_t rtTimeSolReleaseCleanup(PRTTIMER pTimer)
+{
+ Assert(pTimer->hCyclicId == CYCLIC_NONE);
+ ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
+ RTMemFree(pTimer);
+ return 0;
+}
+
+
+/**
+ * Releases a reference to the timer.
+ *
+ * @returns New reference counter value.
+ * @param pTimer The timer.
+ */
+DECLINLINE(uint32_t) rtTimerSolRelease(PRTTIMER pTimer)
+{
+ uint32_t cRefs = ASMAtomicDecU32(&pTimer->cRefs);
+ if (!cRefs)
+ return rtTimeSolReleaseCleanup(pTimer);
+ return cRefs;
+}
+
+
+/**
+ * Callback wrapper for single-CPU timers.
+ *
+ * @param pvArg Opaque pointer to the timer.
+ *
+ * @remarks This will be executed in interrupt context but only at the specified
+ * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
+ * cyclic subsystem here, neither should pfnTimer().
+ */
+static void rtTimerSolSingleCallbackWrapper(void *pvArg)
+{
+ PRTTIMER pTimer = (PRTTIMER)pvArg;
+ AssertPtrReturnVoid(pTimer);
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ Assert(!pTimer->fAllCpus);
+
+ /* Make sure one-shots do not fire another time. */
+ Assert( !pTimer->fSuspended
+ || pTimer->cNsInterval != 0);
+
+ if (!pTimer->fSuspendedFromTimer)
+ {
+ /* Make sure we are firing on the right CPU. */
+ Assert( !pTimer->fSpecificCpu
+ || pTimer->iCpu == RTMpCpuId());
+
+ /* For one-shot, we may allow the callback to restart them. */
+ if (pTimer->cNsInterval == 0)
+ pTimer->fSuspendedFromTimer = true;
+
+ /*
+ * Perform the callout.
+ */
+ pTimer->u.Single.pActiveThread = curthread;
+
+ uint64_t u64Tick = ++pTimer->u.Single.u64Tick;
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
+
+ pTimer->u.Single.pActiveThread = NULL;
+
+ if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
+ {
+ if ( !pTimer->fIntervalChanged
+ || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
+ return;
+
+ /*
+ * The interval was changed, we need to set the expiration time
+ * ourselves before returning. This comes at a slight cost,
+ * which is why we don't do it all the time.
+ */
+ if (pTimer->u.Single.nsNextTick)
+ pTimer->u.Single.nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
+ else
+ pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
+ cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Single.nsNextTick);
+ return;
+ }
+
+ /*
+ * The timer has been suspended, set expiration time to infinitiy.
+ */
+ }
+ if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
+ cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
+}
+
+
+/**
+ * Callback wrapper for Omni-CPU timers.
+ *
+ * @param pvArg Opaque pointer to the timer.
+ *
+ * @remarks This will be executed in interrupt context but only at the specified
+ * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
+ * cyclic subsystem here, neither should pfnTimer().
+ */
+static void rtTimerSolOmniCallbackWrapper(void *pvArg)
+{
+ PRTTIMER pTimer = (PRTTIMER)pvArg;
+ AssertPtrReturnVoid(pTimer);
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ Assert(pTimer->fAllCpus);
+
+ if (!pTimer->fSuspendedFromTimer)
+ {
+ /*
+ * Perform the callout.
+ */
+ uint32_t const iCpu = CPU->cpu_id;
+
+ pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = curthread;
+ uint64_t u64Tick = ++pTimer->u.Omni.aPerCpu[iCpu].u64Tick;
+
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
+
+ pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = NULL;
+
+ if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
+ {
+ if ( !pTimer->fIntervalChanged
+ || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
+ return;
+
+ /*
+ * The interval was changed, we need to set the expiration time
+ * ourselves before returning. This comes at a slight cost,
+ * which is why we don't do it all the time.
+ *
+ * Note! The cyclic_reprogram call only affects the omni cyclic
+ * component for this CPU.
+ */
+ if (pTimer->u.Omni.aPerCpu[iCpu].nsNextTick)
+ pTimer->u.Omni.aPerCpu[iCpu].nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
+ else
+ pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
+ cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Omni.aPerCpu[iCpu].nsNextTick);
+ return;
+ }
+
+ /*
+ * The timer has been suspended, set expiration time to infinitiy.
+ */
+ }
+ if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
+ cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
+}
+
+
+/**
+ * Omni-CPU cyclic online event. This is called before the omni cycle begins to
+ * fire on the specified CPU.
+ *
+ * @param pvArg Opaque pointer to the timer.
+ * @param pCpu Pointer to the CPU on which it will fire.
+ * @param pCyclicHandler Pointer to a cyclic handler to add to the CPU
+ * specified in @a pCpu.
+ * @param pCyclicTime Pointer to the cyclic time and interval object.
+ *
+ * @remarks We -CANNOT- call back into the cyclic subsystem here, we can however
+ * block (sleep).
+ */
+static void rtTimerSolOmniCpuOnline(void *pvArg, cpu_t *pCpu, cyc_handler_t *pCyclicHandler, cyc_time_t *pCyclicTime)
+{
+ PRTTIMER pTimer = (PRTTIMER)pvArg;
+ AssertPtrReturnVoid(pTimer);
+ AssertPtrReturnVoid(pCpu);
+ AssertPtrReturnVoid(pCyclicHandler);
+ AssertPtrReturnVoid(pCyclicTime);
+ uint32_t const iCpu = pCpu->cpu_id; /* Note! CPU is not necessarily the same as pCpu. */
+
+ pTimer->u.Omni.aPerCpu[iCpu].u64Tick = 0;
+ pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = 0;
+
+ pCyclicHandler->cyh_func = (cyc_func_t)rtTimerSolOmniCallbackWrapper;
+ pCyclicHandler->cyh_arg = pTimer;
+ pCyclicHandler->cyh_level = CY_LOCK_LEVEL;
+
+ uint64_t u64Now = RTTimeSystemNanoTS();
+ if (pTimer->u.Omni.u64When < u64Now)
+ pCyclicTime->cyt_when = u64Now + pTimer->cNsInterval / 2;
+ else
+ pCyclicTime->cyt_when = pTimer->u.Omni.u64When;
+
+ pCyclicTime->cyt_interval = pTimer->cNsInterval;
+}
+
+
+RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ *ppTimer = NULL;
+
+ /*
+ * Validate flags.
+ */
+ if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
+ return VERR_INVALID_PARAMETER;
+
+ if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
+ && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
+ && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
+ return VERR_CPU_NOT_FOUND;
+
+ /* One-shot omni timers are not supported by the cyclic system. */
+ if ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
+ && u64NanoInterval == 0)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Allocate and initialize the timer handle. The omni variant has a
+ * variable sized array of ticks counts, thus the size calculation.
+ */
+ PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
+ ? RT_UOFFSETOF_DYN(RTTIMER, u.Omni.aPerCpu[RTMpGetCount()])
+ : sizeof(RTTIMER));
+ if (!pTimer)
+ return VERR_NO_MEMORY;
+
+ pTimer->u32Magic = RTTIMER_MAGIC;
+ pTimer->cRefs = 1;
+ pTimer->fSuspended = true;
+ pTimer->fSuspendedFromTimer = false;
+ pTimer->fIntervalChanged = false;
+ if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
+ {
+ pTimer->fAllCpus = true;
+ pTimer->fSpecificCpu = false;
+ pTimer->iCpu = UINT32_MAX;
+ }
+ else if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
+ {
+ pTimer->fAllCpus = false;
+ pTimer->fSpecificCpu = true;
+ pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; /* ASSUMES: index == cpuid */
+ }
+ else
+ {
+ pTimer->fAllCpus = false;
+ pTimer->fSpecificCpu = false;
+ pTimer->iCpu = UINT32_MAX;
+ }
+ pTimer->cNsInterval = u64NanoInterval;
+ pTimer->pfnTimer = pfnTimer;
+ pTimer->pvUser = pvUser;
+ pTimer->hCyclicId = CYCLIC_NONE;
+
+ *ppTimer = pTimer;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if the calling thread is currently executing the timer proceduce for
+ * the given timer.
+ *
+ * @returns true if it is, false if it isn't.
+ * @param pTimer The timer in question.
+ */
+DECLINLINE(bool) rtTimerSolIsCallingFromTimerProc(PRTTIMER pTimer)
+{
+ kthread_t *pCurThread = curthread;
+ AssertReturn(pCurThread, false); /* serious paranoia */
+
+ if (!pTimer->fAllCpus)
+ return pTimer->u.Single.pActiveThread == pCurThread;
+ return pTimer->u.Omni.aPerCpu[CPU->cpu_id].pActiveThread == pCurThread;
+}
+
+
+RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
+{
+ if (pTimer == NULL)
+ return VINF_SUCCESS;
+ RTTIMER_ASSERT_VALID_RET(pTimer);
+ RT_ASSERT_INTS_ON();
+
+ /*
+ * It is not possible to destroy a timer from it's callback function.
+ * Cyclic makes that impossible (or at least extremely risky).
+ */
+ AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT);
+
+ /*
+ * Invalidate the handle, make sure it's stopped and free the associated resources.
+ */
+ ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
+
+ if ( !pTimer->fSuspended
+ || pTimer->hCyclicId != CYCLIC_NONE) /* 2nd check shouldn't happen */
+ rtTimerSolStopIt(pTimer);
+
+ rtTimerSolRelease(pTimer);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
+{
+ RTTIMER_ASSERT_VALID_RET(pTimer);
+ RT_ASSERT_INTS_ON();
+
+ /*
+ * It's not possible to restart a one-shot time from it's callback function,
+ * at least not at the moment.
+ */
+ AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT);
+
+ mutex_enter(&cpu_lock);
+
+ /*
+ * Make sure it's not active already. If it was suspended from a timer
+ * callback function, we need to do some cleanup work here before we can
+ * restart the timer.
+ */
+ if (!pTimer->fSuspended)
+ {
+ if (!pTimer->fSuspendedFromTimer)
+ {
+ mutex_exit(&cpu_lock);
+ return VERR_TIMER_ACTIVE;
+ }
+ cyclic_remove(pTimer->hCyclicId);
+ pTimer->hCyclicId = CYCLIC_NONE;
+ }
+
+ pTimer->fSuspended = false;
+ pTimer->fSuspendedFromTimer = false;
+ pTimer->fIntervalChanged = false;
+ if (pTimer->fAllCpus)
+ {
+ /*
+ * Setup omni (all CPU) timer. The Omni-CPU online event will fire
+ * and from there we setup periodic timers per CPU.
+ */
+ pTimer->u.Omni.u64When = RTTimeSystemNanoTS() + (u64First ? u64First : pTimer->cNsInterval);
+
+ cyc_omni_handler_t HandlerOmni;
+ HandlerOmni.cyo_online = rtTimerSolOmniCpuOnline;
+ HandlerOmni.cyo_offline = NULL;
+ HandlerOmni.cyo_arg = pTimer;
+
+ pTimer->hCyclicId = cyclic_add_omni(&HandlerOmni);
+ }
+ else
+ {
+ cyc_handler_t Handler;
+ cyc_time_t FireTime;
+
+ /*
+ * Setup a single CPU timer. If a specific CPU was requested, it
+ * must be online or the timer cannot start.
+ */
+ if ( pTimer->fSpecificCpu
+ && !RTMpIsCpuOnline(pTimer->iCpu)) /* ASSUMES: index == cpuid */
+ {
+ pTimer->fSuspended = true;
+
+ mutex_exit(&cpu_lock);
+ return VERR_CPU_OFFLINE;
+ }
+
+ Handler.cyh_func = (cyc_func_t)rtTimerSolSingleCallbackWrapper;
+ Handler.cyh_arg = pTimer;
+ Handler.cyh_level = CY_LOCK_LEVEL;
+
+ /*
+ * Use a large interval (1 hour) so that we don't get a timer-callback between
+ * cyclic_add() and cyclic_bind(). Program the correct interval once cyclic_bind() is done.
+ * See @bugref{7691#c20}.
+ */
+ if (!pTimer->fSpecificCpu)
+ FireTime.cyt_when = RTTimeSystemNanoTS() + u64First;
+ else
+ FireTime.cyt_when = RTTimeSystemNanoTS() + u64First + RT_NS_1HOUR;
+ FireTime.cyt_interval = pTimer->cNsInterval != 0
+ ? pTimer->cNsInterval
+ : CY_INFINITY /* Special value, see cyclic_fire(). */;
+ pTimer->u.Single.u64Tick = 0;
+ pTimer->u.Single.nsNextTick = 0;
+
+ pTimer->hCyclicId = cyclic_add(&Handler, &FireTime);
+ if (pTimer->fSpecificCpu)
+ {
+ cyclic_bind(pTimer->hCyclicId, cpu[pTimer->iCpu], NULL /* cpupart */);
+ cyclic_reprogram(pTimer->hCyclicId, RTTimeSystemNanoTS() + u64First);
+ }
+ }
+
+ mutex_exit(&cpu_lock);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker common for RTTimerStop and RTTimerDestroy.
+ *
+ * @param pTimer The timer to stop.
+ */
+static void rtTimerSolStopIt(PRTTIMER pTimer)
+{
+ mutex_enter(&cpu_lock);
+
+ pTimer->fSuspended = true;
+ if (pTimer->hCyclicId != CYCLIC_NONE)
+ {
+ cyclic_remove(pTimer->hCyclicId);
+ pTimer->hCyclicId = CYCLIC_NONE;
+ }
+ pTimer->fSuspendedFromTimer = false;
+
+ mutex_exit(&cpu_lock);
+}
+
+
+RTDECL(int) RTTimerStop(PRTTIMER pTimer)
+{
+ RTTIMER_ASSERT_VALID_RET(pTimer);
+ RT_ASSERT_INTS_ON();
+
+ if (pTimer->fSuspended)
+ return VERR_TIMER_SUSPENDED;
+
+ /* Trying the cpu_lock stuff and calling cyclic_remove may deadlock
+ the system, so just mark the timer as suspened and deal with it in
+ the callback wrapper function above. */
+ if (rtTimerSolIsCallingFromTimerProc(pTimer))
+ pTimer->fSuspendedFromTimer = true;
+ else
+ rtTimerSolStopIt(pTimer);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
+{
+ /*
+ * Validate.
+ */
+ RTTIMER_ASSERT_VALID_RET(pTimer);
+ AssertReturn(u64NanoInterval > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(u64NanoInterval < UINT64_MAX / 8, VERR_INVALID_PARAMETER);
+ AssertReturn(pTimer->cNsInterval, VERR_INVALID_STATE);
+
+ if (pTimer->fSuspended || pTimer->fSuspendedFromTimer)
+ pTimer->cNsInterval = u64NanoInterval;
+ else
+ {
+ ASMAtomicWriteU64(&pTimer->cNsInterval, u64NanoInterval);
+ ASMAtomicWriteBool(&pTimer->fIntervalChanged, true);
+
+ if ( !pTimer->fAllCpus
+ && !pTimer->u.Single.nsNextTick
+ && pTimer->hCyclicId != CYCLIC_NONE
+ && rtTimerSolIsCallingFromTimerProc(pTimer))
+ pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS();
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
+{
+ return nsec_per_tick;
+}
+
+
+RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTDECL(bool) RTTimerCanDoHighResolution(void)
+{
+ return true;
+}
+