summaryrefslogtreecommitdiffstats
path: root/src/VBox/Runtime/r0drv
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/VBox/Runtime/r0drv/Makefile.kup0
-rw-r--r--src/VBox/Runtime/r0drv/RTR0DbgKrnlInfoGetSymbol.cpp46
-rw-r--r--src/VBox/Runtime/r0drv/alloc-ef-r0drv.cpp957
-rw-r--r--src/VBox/Runtime/r0drv/alloc-r0drv.cpp438
-rw-r--r--src/VBox/Runtime/r0drv/alloc-r0drv.h104
-rw-r--r--src/VBox/Runtime/r0drv/darwin/Makefile.kup0
-rw-r--r--src/VBox/Runtime/r0drv/darwin/RTLogWriteDebugger-r0drv-darwin.cpp42
-rw-r--r--src/VBox/Runtime/r0drv/darwin/RTLogWriteStdOut-r0drv-darwin.cpp42
-rw-r--r--src/VBox/Runtime/r0drv/darwin/alloc-r0drv-darwin.cpp191
-rw-r--r--src/VBox/Runtime/r0drv/darwin/assert-r0drv-darwin.cpp72
-rw-r--r--src/VBox/Runtime/r0drv/darwin/dbgkrnlinfo-r0drv-darwin.cpp1083
-rw-r--r--src/VBox/Runtime/r0drv/darwin/fileio-r0drv-darwin.cpp311
-rw-r--r--src/VBox/Runtime/r0drv/darwin/initterm-r0drv-darwin.cpp115
-rw-r--r--src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp1244
-rw-r--r--src/VBox/Runtime/r0drv/darwin/memuserkernel-r0drv-darwin.cpp118
-rw-r--r--src/VBox/Runtime/r0drv/darwin/mp-r0drv-darwin.cpp314
-rw-r--r--src/VBox/Runtime/r0drv/darwin/process-r0drv-darwin.cpp46
-rw-r--r--src/VBox/Runtime/r0drv/darwin/rtStrFormatKernelAddress-r0drv-darwin.cpp50
-rw-r--r--src/VBox/Runtime/r0drv/darwin/semevent-r0drv-darwin.cpp427
-rw-r--r--src/VBox/Runtime/r0drv/darwin/semeventmulti-r0drv-darwin.cpp449
-rw-r--r--src/VBox/Runtime/r0drv/darwin/semfastmutex-r0drv-darwin.cpp140
-rw-r--r--src/VBox/Runtime/r0drv/darwin/semmutex-r0drv-darwin.cpp407
-rw-r--r--src/VBox/Runtime/r0drv/darwin/spinlock-r0drv-darwin.cpp177
-rw-r--r--src/VBox/Runtime/r0drv/darwin/the-darwin-kernel.h240
-rw-r--r--src/VBox/Runtime/r0drv/darwin/thread-r0drv-darwin.cpp82
-rw-r--r--src/VBox/Runtime/r0drv/darwin/thread2-r0drv-darwin.cpp192
-rw-r--r--src/VBox/Runtime/r0drv/darwin/threadpreempt-r0drv-darwin.cpp203
-rw-r--r--src/VBox/Runtime/r0drv/darwin/time-r0drv-darwin.cpp98
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/Makefile.kup0
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/alloc-r0drv-freebsd.c185
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/assert-r0drv-freebsd.c70
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/initterm-r0drv-freebsd.c53
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c905
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/memuserkernel-r0drv-freebsd.c83
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/mp-r0drv-freebsd.c308
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/process-r0drv-freebsd.c51
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/semevent-r0drv-freebsd.c256
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/semeventmulti-r0drv-freebsd.c320
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/semfastmutex-r0drv-freebsd.c115
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/semmutex-r0drv-freebsd.c219
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/sleepqueue-r0drv-freebsd.h334
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/spinlock-r0drv-freebsd.c210
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/the-freebsd-kernel.h122
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/thread-r0drv-freebsd.c186
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/thread2-r0drv-freebsd.c155
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/time-r0drv-freebsd.c74
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/timer-r0drv-freebsd.c286
-rw-r--r--src/VBox/Runtime/r0drv/generic/Makefile.kup0
-rw-r--r--src/VBox/Runtime/r0drv/generic/RTMpIsCpuWorkPending-r0drv-generic.cpp45
-rw-r--r--src/VBox/Runtime/r0drv/generic/RTMpOn-r0drv-generic.cpp94
-rw-r--r--src/VBox/Runtime/r0drv/generic/RTMpPokeCpu-r0drv-generic.cpp48
-rw-r--r--src/VBox/Runtime/r0drv/generic/RTThreadPreemptDisable-r0drv-generic.cpp44
-rw-r--r--src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsEnabled-r0drv-generic.cpp43
-rw-r--r--src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsPending-r0drv-generic.cpp43
-rw-r--r--src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsPendingTrusty-r0drv-generic.cpp41
-rw-r--r--src/VBox/Runtime/r0drv/generic/RTThreadPreemptRestore-r0drv-generic.cpp44
-rw-r--r--src/VBox/Runtime/r0drv/generic/mpnotification-r0drv-generic.cpp65
-rw-r--r--src/VBox/Runtime/r0drv/generic/semspinmutex-r0drv-generic.c503
-rw-r--r--src/VBox/Runtime/r0drv/generic/threadctxhooks-r0drv-generic.cpp73
-rw-r--r--src/VBox/Runtime/r0drv/haiku/Makefile.kup0
-rw-r--r--src/VBox/Runtime/r0drv/haiku/RTLogWriteDebugger-r0drv-haiku.c42
-rw-r--r--src/VBox/Runtime/r0drv/haiku/RTLogWriteStdOut-r0drv-haiku.c41
-rw-r--r--src/VBox/Runtime/r0drv/haiku/alloc-r0drv-haiku.c124
-rw-r--r--src/VBox/Runtime/r0drv/haiku/assert-r0drv-haiku.c68
-rw-r--r--src/VBox/Runtime/r0drv/haiku/initterm-r0drv-haiku.c48
-rw-r--r--src/VBox/Runtime/r0drv/haiku/memobj-r0drv-haiku.c664
-rw-r--r--src/VBox/Runtime/r0drv/haiku/mp-r0drv-haiku.c236
-rw-r--r--src/VBox/Runtime/r0drv/haiku/process-r0drv-haiku.c46
-rw-r--r--src/VBox/Runtime/r0drv/haiku/semevent-r0drv-haiku.c264
-rw-r--r--src/VBox/Runtime/r0drv/haiku/semeventmulti-r0drv-haiku.c292
-rw-r--r--src/VBox/Runtime/r0drv/haiku/semfastmutex-r0drv-haiku.c120
-rw-r--r--src/VBox/Runtime/r0drv/haiku/semmutex-r0drv-haiku.c233
-rw-r--r--src/VBox/Runtime/r0drv/haiku/spinlock-r0drv-haiku.c138
-rw-r--r--src/VBox/Runtime/r0drv/haiku/the-haiku-kernel.h116
-rw-r--r--src/VBox/Runtime/r0drv/haiku/thread-r0drv-haiku.c127
-rw-r--r--src/VBox/Runtime/r0drv/haiku/thread2-r0drv-haiku.c138
-rw-r--r--src/VBox/Runtime/r0drv/haiku/time-r0drv-haiku.c79
-rw-r--r--src/VBox/Runtime/r0drv/initterm-r0drv.cpp164
-rw-r--r--src/VBox/Runtime/r0drv/linux/Makefile.kup0
-rw-r--r--src/VBox/Runtime/r0drv/linux/RTLogWriteDebugger-r0drv-linux.c43
-rw-r--r--src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c501
-rw-r--r--src/VBox/Runtime/r0drv/linux/assert-r0drv-linux.c74
-rw-r--r--src/VBox/Runtime/r0drv/linux/initterm-r0drv-linux.c137
-rw-r--r--src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c1768
-rw-r--r--src/VBox/Runtime/r0drv/linux/memuserkernel-r0drv-linux.c181
-rw-r--r--src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c626
-rw-r--r--src/VBox/Runtime/r0drv/linux/mpnotification-r0drv-linux.c248
-rw-r--r--src/VBox/Runtime/r0drv/linux/process-r0drv-linux.c49
-rw-r--r--src/VBox/Runtime/r0drv/linux/rtStrFormatKernelAddress-r0drv-linux.c56
-rw-r--r--src/VBox/Runtime/r0drv/linux/semevent-r0drv-linux.c279
-rw-r--r--src/VBox/Runtime/r0drv/linux/semeventmulti-r0drv-linux.c344
-rw-r--r--src/VBox/Runtime/r0drv/linux/semfastmutex-r0drv-linux.c157
-rw-r--r--src/VBox/Runtime/r0drv/linux/semmutex-r0drv-linux.c421
-rw-r--r--src/VBox/Runtime/r0drv/linux/spinlock-r0drv-linux.c186
-rw-r--r--src/VBox/Runtime/r0drv/linux/string.h60
-rw-r--r--src/VBox/Runtime/r0drv/linux/the-linux-kernel.h461
-rw-r--r--src/VBox/Runtime/r0drv/linux/thread-r0drv-linux.c234
-rw-r--r--src/VBox/Runtime/r0drv/linux/thread2-r0drv-linux.c162
-rw-r--r--src/VBox/Runtime/r0drv/linux/threadctxhooks-r0drv-linux.c330
-rw-r--r--src/VBox/Runtime/r0drv/linux/time-r0drv-linux.c196
-rw-r--r--src/VBox/Runtime/r0drv/linux/timer-r0drv-linux.c1693
-rw-r--r--src/VBox/Runtime/r0drv/linux/waitqueue-r0drv-linux.h292
-rw-r--r--src/VBox/Runtime/r0drv/memobj-r0drv.cpp808
-rw-r--r--src/VBox/Runtime/r0drv/mp-r0drv.h85
-rw-r--r--src/VBox/Runtime/r0drv/mpnotification-r0drv.c322
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/Makefile.kup0
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/RTLogWriteStdOut-r0drv-netbsd.c39
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/alloc-r0drv-netbsd.c165
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/assert-r0drv-netbsd.c63
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/initterm-r0drv-netbsd.c52
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/memobj-r0drv-netbsd.c558
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/memuserkernel-r0drv-netbsd.c82
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/mp-r0drv-netbsd.c43
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/process-r0drv-netbsd.c51
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/semevent-r0drv-netbsd.c255
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/semeventmulti-r0drv-netbsd.c319
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/semfastmutex-r0drv-netbsd.c114
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/semmutex-r0drv-netbsd.c218
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/sleepqueue-r0drv-netbsd.h281
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/spinlock-r0drv-netbsd.c148
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/the-netbsd-kernel.h75
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/thread-r0drv-netbsd.c181
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/thread2-r0drv-netbsd.c135
-rw-r--r--src/VBox/Runtime/r0drv/netbsd/time-r0drv-netbsd.c73
-rw-r--r--src/VBox/Runtime/r0drv/nt/Makefile.kup0
-rw-r--r--src/VBox/Runtime/r0drv/nt/RTLogWriteDebugger-r0drv-nt.cpp39
-rw-r--r--src/VBox/Runtime/r0drv/nt/RTTimerGetSystemGranularity-r0drv-nt.cpp61
-rw-r--r--src/VBox/Runtime/r0drv/nt/alloc-r0drv-nt.cpp151
-rw-r--r--src/VBox/Runtime/r0drv/nt/assert-r0drv-nt.cpp66
-rw-r--r--src/VBox/Runtime/r0drv/nt/dbgkrnlinfo-r0drv-nt.cpp761
-rw-r--r--src/VBox/Runtime/r0drv/nt/initterm-r0drv-nt.cpp507
-rw-r--r--src/VBox/Runtime/r0drv/nt/internal-r0drv-nt.h139
-rw-r--r--src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp1010
-rw-r--r--src/VBox/Runtime/r0drv/nt/memuserkernel-r0drv-nt.cpp123
-rw-r--r--src/VBox/Runtime/r0drv/nt/mp-r0drv-nt.cpp1952
-rw-r--r--src/VBox/Runtime/r0drv/nt/nt3fakes-r0drv-nt.cpp813
-rw-r--r--src/VBox/Runtime/r0drv/nt/nt3fakes-stub-r0drv-nt.cpp42
-rw-r--r--src/VBox/Runtime/r0drv/nt/nt3fakesA-r0drv-nt.asm147
-rw-r--r--src/VBox/Runtime/r0drv/nt/ntBldSymDb.cpp1212
-rw-r--r--src/VBox/Runtime/r0drv/nt/process-r0drv-nt.cpp45
-rw-r--r--src/VBox/Runtime/r0drv/nt/semevent-r0drv-nt.cpp277
-rw-r--r--src/VBox/Runtime/r0drv/nt/semeventmulti-r0drv-nt.cpp300
-rw-r--r--src/VBox/Runtime/r0drv/nt/semfastmutex-r0drv-nt.cpp138
-rw-r--r--src/VBox/Runtime/r0drv/nt/semmutex-r0drv-nt.cpp236
-rw-r--r--src/VBox/Runtime/r0drv/nt/spinlock-r0drv-nt.cpp197
-rw-r--r--src/VBox/Runtime/r0drv/nt/symdb.h88
-rw-r--r--src/VBox/Runtime/r0drv/nt/symdbdata.h2988
-rw-r--r--src/VBox/Runtime/r0drv/nt/the-nt-kernel.h89
-rw-r--r--src/VBox/Runtime/r0drv/nt/thread-r0drv-nt.cpp228
-rw-r--r--src/VBox/Runtime/r0drv/nt/thread2-r0drv-nt.cpp157
-rw-r--r--src/VBox/Runtime/r0drv/nt/time-r0drv-nt.cpp149
-rw-r--r--src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp597
-rw-r--r--src/VBox/Runtime/r0drv/nt/toxic-chkstk-r0drv-nt.asm42
-rw-r--r--src/VBox/Runtime/r0drv/os2/Makefile.kup0
-rw-r--r--src/VBox/Runtime/r0drv/os2/RTR0AssertPanicSystem-r0drv-os2.asm104
-rw-r--r--src/VBox/Runtime/r0drv/os2/RTR0Os2DHQueryDOSVar.asm183
-rw-r--r--src/VBox/Runtime/r0drv/os2/RTR0Os2DHVMGlobalToProcess.asm114
-rw-r--r--src/VBox/Runtime/r0drv/os2/alloc-r0drv-os2.cpp107
-rw-r--r--src/VBox/Runtime/r0drv/os2/assert-r0drv-os2.cpp134
-rw-r--r--src/VBox/Runtime/r0drv/os2/assertA-r0drv-os2.asm47
-rw-r--r--src/VBox/Runtime/r0drv/os2/initterm-r0drv-os2.cpp98
-rw-r--r--src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp572
-rw-r--r--src/VBox/Runtime/r0drv/os2/memuserkernel-r0drv-os2.cpp91
-rw-r--r--src/VBox/Runtime/r0drv/os2/os2imports.imp121
-rw-r--r--src/VBox/Runtime/r0drv/os2/process-r0drv-os2.cpp54
-rw-r--r--src/VBox/Runtime/r0drv/os2/semevent-r0drv-os2.cpp271
-rw-r--r--src/VBox/Runtime/r0drv/os2/semeventmulti-r0drv-os2.cpp281
-rw-r--r--src/VBox/Runtime/r0drv/os2/semfastmutex-r0drv-os2.cpp115
-rw-r--r--src/VBox/Runtime/r0drv/os2/spinlock-r0drv-os2.cpp131
-rw-r--r--src/VBox/Runtime/r0drv/os2/the-os2-kernel.h59
-rw-r--r--src/VBox/Runtime/r0drv/os2/thread-r0drv-os2.cpp191
-rw-r--r--src/VBox/Runtime/r0drv/os2/thread2-r0drv-os2.cpp86
-rw-r--r--src/VBox/Runtime/r0drv/os2/time-r0drv-os2.cpp92
-rw-r--r--src/VBox/Runtime/r0drv/os2/timer-r0drv-os2.cpp385
-rw-r--r--src/VBox/Runtime/r0drv/os2/timerA-r0drv-os2.asm218
-rw-r--r--src/VBox/Runtime/r0drv/power-r0drv.h44
-rw-r--r--src/VBox/Runtime/r0drv/powernotification-r0drv.c318
-rw-r--r--src/VBox/Runtime/r0drv/solaris/Makefile.kup0
-rw-r--r--src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c66
-rw-r--r--src/VBox/Runtime/r0drv/solaris/RTMpPokeCpu-r0drv-solaris.c50
-rw-r--r--src/VBox/Runtime/r0drv/solaris/alloc-r0drv-solaris.c206
-rw-r--r--src/VBox/Runtime/r0drv/solaris/assert-r0drv-solaris.c77
-rw-r--r--src/VBox/Runtime/r0drv/solaris/dbgkrnlinfo-r0drv-solaris.c339
-rw-r--r--src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c282
-rw-r--r--src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c1166
-rw-r--r--src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.h324
-rw-r--r--src/VBox/Runtime/r0drv/solaris/memuserkernel-r0drv-solaris.c100
-rw-r--r--src/VBox/Runtime/r0drv/solaris/modulestub-r0drv-solaris.c79
-rw-r--r--src/VBox/Runtime/r0drv/solaris/mp-r0drv-solaris.c450
-rw-r--r--src/VBox/Runtime/r0drv/solaris/mpnotification-r0drv-solaris.c139
-rw-r--r--src/VBox/Runtime/r0drv/solaris/process-r0drv-solaris.c49
-rw-r--r--src/VBox/Runtime/r0drv/solaris/semevent-r0drv-solaris.c347
-rw-r--r--src/VBox/Runtime/r0drv/solaris/semeventmulti-r0drv-solaris.c355
-rw-r--r--src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h496
-rw-r--r--src/VBox/Runtime/r0drv/solaris/semfastmutex-r0drv-solaris.c120
-rw-r--r--src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c387
-rw-r--r--src/VBox/Runtime/r0drv/solaris/spinlock-r0drv-solaris.c204
-rw-r--r--src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h216
-rw-r--r--src/VBox/Runtime/r0drv/solaris/thread-r0drv-solaris.c185
-rw-r--r--src/VBox/Runtime/r0drv/solaris/thread2-r0drv-solaris.c150
-rw-r--r--src/VBox/Runtime/r0drv/solaris/threadctxhooks-r0drv-solaris.c349
-rw-r--r--src/VBox/Runtime/r0drv/solaris/time-r0drv-solaris.c70
-rw-r--r--src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c650
203 files changed, 51294 insertions, 0 deletions
diff --git a/src/VBox/Runtime/r0drv/Makefile.kup b/src/VBox/Runtime/r0drv/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/Makefile.kup
diff --git a/src/VBox/Runtime/r0drv/RTR0DbgKrnlInfoGetSymbol.cpp b/src/VBox/Runtime/r0drv/RTR0DbgKrnlInfoGetSymbol.cpp
new file mode 100644
index 00000000..9999032c
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/RTR0DbgKrnlInfoGetSymbol.cpp
@@ -0,0 +1,46 @@
+/* $Id: RTR0DbgKrnlInfoGetSymbol.cpp $ */
+/** @file
+ * IPRT - RTR0DbgKrnlInfoGetSymbol, ring-0 drivers.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "internal/iprt.h"
+#include <iprt/dbg.h>
+
+#include <iprt/errcore.h>
+
+
+RTR0DECL(void *) RTR0DbgKrnlInfoGetSymbol(RTDBGKRNLINFO hKrnlInfo, const char *pszModule, const char *pszSymbol)
+{
+ void *pvSymbol = NULL;
+ int rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, pszModule, pszSymbol, &pvSymbol);
+ if (RT_SUCCESS(rc))
+ return pvSymbol;
+ return NULL;
+}
+RT_EXPORT_SYMBOL(RTR0DbgKrnlInfoGetSymbol);
+
diff --git a/src/VBox/Runtime/r0drv/alloc-ef-r0drv.cpp b/src/VBox/Runtime/r0drv/alloc-ef-r0drv.cpp
new file mode 100644
index 00000000..cf3037eb
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/alloc-ef-r0drv.cpp
@@ -0,0 +1,957 @@
+/* $Id: alloc-ef-r0drv.cpp $ */
+/** @file
+ * IPRT - Memory Allocation, electric fence for ring-0 drivers.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTMEM_NO_WRAP_TO_EF_APIS
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+
+#include <iprt/alloc.h>
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/log.h>
+#include <iprt/memobj.h>
+#include <iprt/param.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+
+#include "internal/mem.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#if defined(DOXYGEN_RUNNING)
+# define RTR0MEM_EF_IN_FRONT
+#endif
+
+/** @def RTR0MEM_EF_SIZE
+ * The size of the fence. This must be page aligned.
+ */
+#define RTR0MEM_EF_SIZE PAGE_SIZE
+
+/** @def RTR0MEM_EF_ALIGNMENT
+ * The allocation alignment, power of two of course.
+ *
+ * Use this for working around misaligned sizes, usually stemming from
+ * allocating a string or something after the main structure. When you
+ * encounter this, please fix the allocation to RTMemAllocVar or RTMemAllocZVar.
+ */
+#if 0
+# define RTR0MEM_EF_ALIGNMENT (ARCH_BITS / 8)
+#else
+# define RTR0MEM_EF_ALIGNMENT 1
+#endif
+
+/** @def RTR0MEM_EF_IN_FRONT
+ * Define this to put the fence up in front of the block.
+ * The default (when this isn't defined) is to up it up after the block.
+ */
+//# define RTR0MEM_EF_IN_FRONT
+
+/** @def RTR0MEM_EF_FREE_DELAYED
+ * This define will enable free() delay and protection of the freed data
+ * while it's being delayed. The value of RTR0MEM_EF_FREE_DELAYED defines
+ * the threshold of the delayed blocks.
+ * Delayed blocks does not consume any physical memory, only virtual address space.
+ */
+#define RTR0MEM_EF_FREE_DELAYED (20 * _1M)
+
+/** @def RTR0MEM_EF_FREE_FILL
+ * This define will enable memset(,RTR0MEM_EF_FREE_FILL,)'ing the user memory
+ * in the block before freeing/decommitting it. This is useful in GDB since GDB
+ * appears to be able to read the content of the page even after it's been
+ * decommitted.
+ */
+#define RTR0MEM_EF_FREE_FILL 'f'
+
+/** @def RTR0MEM_EF_FILLER
+ * This define will enable memset(,RTR0MEM_EF_FILLER,)'ing the allocated
+ * memory when the API doesn't require it to be zero'd.
+ */
+#define RTR0MEM_EF_FILLER 0xef
+
+/** @def RTR0MEM_EF_NOMAN_FILLER
+ * This define will enable memset(,RTR0MEM_EF_NOMAN_FILLER,)'ing the
+ * unprotected but not allocated area of memory, the so called no man's land.
+ */
+#define RTR0MEM_EF_NOMAN_FILLER 0xaa
+
+/** @def RTR0MEM_EF_FENCE_FILLER
+ * This define will enable memset(,RTR0MEM_EF_FENCE_FILLER,)'ing the
+ * fence itself, as debuggers can usually read them.
+ */
+#define RTR0MEM_EF_FENCE_FILLER 0xcc
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#ifdef RT_OS_WINDOWS
+# include <iprt/win/windows.h>
+#elif !defined(RT_OS_FREEBSD)
+# include <sys/mman.h>
+#endif
+#include <iprt/avl.h>
+#include <iprt/thread.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Allocation types.
+ */
+typedef enum RTMEMTYPE
+{
+ RTMEMTYPE_RTMEMALLOC,
+ RTMEMTYPE_RTMEMALLOCZ,
+ RTMEMTYPE_RTMEMREALLOC,
+ RTMEMTYPE_RTMEMFREE,
+
+ RTMEMTYPE_NEW,
+ RTMEMTYPE_NEW_ARRAY,
+ RTMEMTYPE_DELETE,
+ RTMEMTYPE_DELETE_ARRAY
+} RTMEMTYPE;
+
+/**
+ * Node tracking a memory allocation.
+ */
+typedef struct RTR0MEMEFBLOCK
+{
+ /** Avl node code, key is the user block pointer. */
+ AVLPVNODECORE Core;
+ /** Allocation type. */
+ RTMEMTYPE enmType;
+ /** The memory object. */
+ RTR0MEMOBJ hMemObj;
+ /** The unaligned size of the block. */
+ size_t cbUnaligned;
+ /** The aligned size of the block. */
+ size_t cbAligned;
+ /** The allocation tag (read-only string). */
+ const char *pszTag;
+ /** The return address of the allocator function. */
+ void *pvCaller;
+ /** Line number of the alloc call. */
+ unsigned iLine;
+ /** File from within the allocation was made. */
+ const char *pszFile;
+ /** Function from within the allocation was made. */
+ const char *pszFunction;
+} RTR0MEMEFBLOCK, *PRTR0MEMEFBLOCK;
+
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Spinlock protecting the all the block's globals. */
+static volatile uint32_t g_BlocksLock;
+/** Tree tracking the allocations. */
+static AVLPVTREE g_BlocksTree;
+
+#ifdef RTR0MEM_EF_FREE_DELAYED
+/** Tail of the delayed blocks. */
+static volatile PRTR0MEMEFBLOCK g_pBlocksDelayHead;
+/** Tail of the delayed blocks. */
+static volatile PRTR0MEMEFBLOCK g_pBlocksDelayTail;
+/** Number of bytes in the delay list (includes fences). */
+static volatile size_t g_cbBlocksDelay;
+#endif /* RTR0MEM_EF_FREE_DELAYED */
+
+/** Array of pointers free watches for. */
+void *gapvRTMemFreeWatch[4] = {NULL, NULL, NULL, NULL};
+/** Enable logging of all freed memory. */
+bool gfRTMemFreeLog = false;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+
+
+/**
+ * @callback_method_impl{FNRTSTROUTPUT}
+ */
+static DECLCALLBACK(size_t) rtR0MemEfWrite(void *pvArg, const char *pachChars, size_t cbChars)
+{
+ RT_NOREF1(pvArg);
+ if (cbChars)
+ {
+ RTLogWriteDebugger(pachChars, cbChars);
+ RTLogWriteStdOut(pachChars, cbChars);
+ RTLogWriteUser(pachChars, cbChars);
+ }
+ return cbChars;
+}
+
+
+/**
+ * Complains about something.
+ */
+static void rtR0MemComplain(const char *pszOp, const char *pszFormat, ...)
+{
+ va_list args;
+ RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem error: %s: ", pszOp);
+ va_start(args, pszFormat);
+ RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
+ va_end(args);
+ RTAssertDoPanic();
+}
+
+/**
+ * Log an event.
+ */
+DECLINLINE(void) rtR0MemLog(const char *pszOp, const char *pszFormat, ...)
+{
+#if 0
+ va_list args;
+ RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem info: %s: ", pszOp);
+ va_start(args, pszFormat);
+ RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
+ va_end(args);
+#else
+ NOREF(pszOp); NOREF(pszFormat);
+#endif
+}
+
+
+
+/**
+ * Acquires the lock.
+ */
+DECLINLINE(RTCCUINTREG) rtR0MemBlockLock(void)
+{
+ RTCCUINTREG uRet;
+ unsigned c = 0;
+ if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
+ {
+ for (;;)
+ {
+ uRet = ASMIntDisableFlags();
+ if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
+ break;
+ ASMSetFlags(uRet);
+ RTThreadSleepNoLog(((++c) >> 2) & 31);
+ }
+ }
+ else
+ {
+ for (;;)
+ {
+ uRet = ASMIntDisableFlags();
+ if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
+ break;
+ ASMSetFlags(uRet);
+ ASMNopPause();
+ if (++c & 3)
+ ASMNopPause();
+ }
+ }
+ return uRet;
+}
+
+
+/**
+ * Releases the lock.
+ */
+DECLINLINE(void) rtR0MemBlockUnlock(RTCCUINTREG fSavedIntFlags)
+{
+ Assert(g_BlocksLock == 1);
+ ASMAtomicXchgU32(&g_BlocksLock, 0);
+ ASMSetFlags(fSavedIntFlags);
+}
+
+
+/**
+ * Creates a block.
+ */
+DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
+ const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
+{
+ PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTMemAlloc(sizeof(*pBlock));
+ if (pBlock)
+ {
+ pBlock->enmType = enmType;
+ pBlock->cbUnaligned = cbUnaligned;
+ pBlock->cbAligned = cbAligned;
+ pBlock->pszTag = pszTag;
+ pBlock->pvCaller = pvCaller;
+ pBlock->iLine = iLine;
+ pBlock->pszFile = pszFile;
+ pBlock->pszFunction = pszFunction;
+ }
+ return pBlock;
+}
+
+
+/**
+ * Frees a block.
+ */
+DECLINLINE(void) rtR0MemBlockFree(PRTR0MEMEFBLOCK pBlock)
+{
+ RTMemFree(pBlock);
+}
+
+
+/**
+ * Insert a block from the tree.
+ */
+DECLINLINE(void) rtR0MemBlockInsert(PRTR0MEMEFBLOCK pBlock, void *pv, RTR0MEMOBJ hMemObj)
+{
+ pBlock->Core.Key = pv;
+ pBlock->hMemObj = hMemObj;
+ RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
+ bool fRc = RTAvlPVInsert(&g_BlocksTree, &pBlock->Core);
+ rtR0MemBlockUnlock(fSavedIntFlags);
+ AssertRelease(fRc);
+}
+
+
+/**
+ * Remove a block from the tree and returns it to the caller.
+ */
+DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockRemove(void *pv)
+{
+ RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
+ PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVRemove(&g_BlocksTree, pv);
+ rtR0MemBlockUnlock(fSavedIntFlags);
+ return pBlock;
+}
+
+
+/**
+ * Gets a block.
+ */
+DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockGet(void *pv)
+{
+ RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
+ PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVGet(&g_BlocksTree, pv);
+ rtR0MemBlockUnlock(fSavedIntFlags);
+ return pBlock;
+}
+
+
+/**
+ * Dumps one allocation.
+ */
+static DECLCALLBACK(int) RTMemDumpOne(PAVLPVNODECORE pNode, void *pvUser)
+{
+ PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
+ RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "%p %08lx(+%02lx) %p\n",
+ pBlock->Core.Key,
+ (unsigned long)pBlock->cbUnaligned,
+ (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned),
+ pBlock->pvCaller);
+ NOREF(pvUser);
+ return 0;
+}
+
+
+/**
+ * Dumps the allocated blocks.
+ * This is something which you should call from gdb.
+ */
+RT_C_DECLS_BEGIN
+void RTMemDump(void);
+RT_C_DECLS_END
+
+void RTMemDump(void)
+{
+ RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "address size(alg) caller\n");
+ RTAvlPVDoWithAll(&g_BlocksTree, true, RTMemDumpOne, NULL);
+}
+
+#ifdef RTR0MEM_EF_FREE_DELAYED
+
+/**
+ * Insert a delayed block.
+ */
+DECLINLINE(void) rtR0MemBlockDelayInsert(PRTR0MEMEFBLOCK pBlock)
+{
+ size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
+ pBlock->Core.pRight = NULL;
+ pBlock->Core.pLeft = NULL;
+ RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
+ if (g_pBlocksDelayHead)
+ {
+ g_pBlocksDelayHead->Core.pLeft = (PAVLPVNODECORE)pBlock;
+ pBlock->Core.pRight = (PAVLPVNODECORE)g_pBlocksDelayHead;
+ g_pBlocksDelayHead = pBlock;
+ }
+ else
+ {
+ g_pBlocksDelayTail = pBlock;
+ g_pBlocksDelayHead = pBlock;
+ }
+ g_cbBlocksDelay += cbBlock;
+ rtR0MemBlockUnlock(fSavedIntFlags);
+}
+
+/**
+ * Removes a delayed block.
+ */
+DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockDelayRemove(void)
+{
+ PRTR0MEMEFBLOCK pBlock = NULL;
+ RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
+ if (g_cbBlocksDelay > RTR0MEM_EF_FREE_DELAYED)
+ {
+ pBlock = g_pBlocksDelayTail;
+ if (pBlock)
+ {
+ g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
+ if (pBlock->Core.pLeft)
+ pBlock->Core.pLeft->pRight = NULL;
+ else
+ g_pBlocksDelayHead = NULL;
+ g_cbBlocksDelay -= RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
+ }
+ }
+ rtR0MemBlockUnlock(fSavedIntFlags);
+ return pBlock;
+}
+
+#endif /* RTR0MEM_EF_FREE_DELAYED */
+
+
+static void rtR0MemFreeBlock(PRTR0MEMEFBLOCK pBlock, const char *pszOp)
+{
+ void *pv = pBlock->Core.Key;
+# ifdef RTR0MEM_EF_IN_FRONT
+ void *pvBlock = (char *)pv - RTR0MEM_EF_SIZE;
+# else
+ void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
+# endif
+ size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
+
+ int rc = RTR0MemObjProtect(pBlock->hMemObj, 0 /*offSub*/, RT_ALIGN_Z(cbBlock, PAGE_SIZE), RTMEM_PROT_READ | RTMEM_PROT_WRITE);
+ if (RT_FAILURE(rc))
+ rtR0MemComplain(pszOp, "RTR0MemObjProtect([%p], 0, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %Rrc\n",
+ pvBlock, cbBlock, rc);
+
+ rc = RTR0MemObjFree(pBlock->hMemObj, true /*fFreeMappings*/);
+ if (RT_FAILURE(rc))
+ rtR0MemComplain(pszOp, "RTR0MemObjFree([%p LB %#x]) -> %Rrc\n", pvBlock, cbBlock, rc);
+ pBlock->hMemObj = NIL_RTR0MEMOBJ;
+
+ rtR0MemBlockFree(pBlock);
+}
+
+
+/**
+ * Initialize call, we shouldn't fail here.
+ */
+void rtR0MemEfInit(void)
+{
+
+}
+
+/**
+ * @callback_method_impl{AVLPVCALLBACK}
+ */
+static DECLCALLBACK(int) rtR0MemEfDestroyBlock(PAVLPVNODECORE pNode, void *pvUser)
+{
+ PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
+
+ /* Note! pszFile and pszFunction may be invalid at this point. */
+ rtR0MemComplain("rtR0MemEfDestroyBlock", "Leaking %zu bytes at %p (iLine=%u pvCaller=%p)\n",
+ pBlock->cbAligned, pBlock->Core.Key, pBlock->iLine, pBlock->pvCaller);
+
+ rtR0MemFreeBlock(pBlock, "rtR0MemEfDestroyBlock");
+
+ NOREF(pvUser);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Termination call.
+ *
+ * Will check and free memory.
+ */
+void rtR0MemEfTerm(void)
+{
+#ifdef RTR0MEM_EF_FREE_DELAYED
+ /*
+ * Release delayed frees.
+ */
+ RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
+ for (;;)
+ {
+ PRTR0MEMEFBLOCK pBlock = g_pBlocksDelayTail;
+ if (pBlock)
+ {
+ g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
+ if (pBlock->Core.pLeft)
+ pBlock->Core.pLeft->pRight = NULL;
+ else
+ g_pBlocksDelayHead = NULL;
+ rtR0MemBlockUnlock(fSavedIntFlags);
+
+ rtR0MemFreeBlock(pBlock, "rtR0MemEfTerm");
+
+ rtR0MemBlockLock();
+ }
+ else
+ break;
+ }
+ g_cbBlocksDelay = 0;
+ rtR0MemBlockUnlock(fSavedIntFlags);
+#endif
+
+ /*
+ * Complain about leaks. Then release them.
+ */
+ RTAvlPVDestroy(&g_BlocksTree, rtR0MemEfDestroyBlock, NULL);
+}
+
+
+/**
+ * Internal allocator.
+ */
+static void * rtR0MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
+ const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
+{
+ /*
+ * Sanity.
+ */
+ if ( RT_ALIGN_Z(RTR0MEM_EF_SIZE, PAGE_SIZE) != RTR0MEM_EF_SIZE
+ && RTR0MEM_EF_SIZE <= 0)
+ {
+ rtR0MemComplain(pszOp, "Invalid E-fence size! %#x\n", RTR0MEM_EF_SIZE);
+ return NULL;
+ }
+ if (!cbUnaligned)
+ {
+#if 1
+ rtR0MemComplain(pszOp, "Request of ZERO bytes allocation!\n");
+ return NULL;
+#else
+ cbAligned = cbUnaligned = 1;
+#endif
+ }
+
+#ifndef RTR0MEM_EF_IN_FRONT
+ /* Alignment decreases fence accuracy, but this is at least partially
+ * counteracted by filling and checking the alignment padding. When the
+ * fence is in front then then no extra alignment is needed. */
+ cbAligned = RT_ALIGN_Z(cbAligned, RTR0MEM_EF_ALIGNMENT);
+#endif
+
+ /*
+ * Allocate the trace block.
+ */
+ PRTR0MEMEFBLOCK pBlock = rtR0MemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS);
+ if (!pBlock)
+ {
+ rtR0MemComplain(pszOp, "Failed to allocate trace block!\n");
+ return NULL;
+ }
+
+ /*
+ * Allocate a block with page alignment space + the size of the E-fence.
+ */
+ void *pvBlock = NULL;
+ RTR0MEMOBJ hMemObj;
+ size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
+ int rc = RTR0MemObjAllocPage(&hMemObj, cbBlock, false /*fExecutable*/);
+ if (RT_SUCCESS(rc))
+ pvBlock = RTR0MemObjAddress(hMemObj);
+ if (pvBlock)
+ {
+ /*
+ * Calc the start of the fence and the user block
+ * and then change the page protection of the fence.
+ */
+#ifdef RTR0MEM_EF_IN_FRONT
+ void *pvEFence = pvBlock;
+ void *pv = (char *)pvEFence + RTR0MEM_EF_SIZE;
+# ifdef RTR0MEM_EF_NOMAN_FILLER
+ memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbUnaligned);
+# endif
+#else
+ void *pvEFence = (char *)pvBlock + (cbBlock - RTR0MEM_EF_SIZE);
+ void *pv = (char *)pvEFence - cbAligned;
+# ifdef RTR0MEM_EF_NOMAN_FILLER
+ memset(pvBlock, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbAligned);
+ memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbAligned - cbUnaligned);
+# endif
+#endif
+
+#ifdef RTR0MEM_EF_FENCE_FILLER
+ memset(pvEFence, RTR0MEM_EF_FENCE_FILLER, RTR0MEM_EF_SIZE);
+#endif
+ rc = RTR0MemObjProtect(hMemObj, (uint8_t *)pvEFence - (uint8_t *)pvBlock, RTR0MEM_EF_SIZE, RTMEM_PROT_NONE);
+ if (!rc)
+ {
+ rtR0MemBlockInsert(pBlock, pv, hMemObj);
+ if (enmType == RTMEMTYPE_RTMEMALLOCZ)
+ memset(pv, 0, cbUnaligned);
+#ifdef RTR0MEM_EF_FILLER
+ else
+ memset(pv, RTR0MEM_EF_FILLER, cbUnaligned);
+#endif
+
+ rtR0MemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);
+ return pv;
+ }
+ rtR0MemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTR0MEM_EF_SIZE, rc);
+ RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
+ }
+ else
+ {
+ rtR0MemComplain(pszOp, "Failed to allocated %zu (%zu) bytes (rc=%Rrc).\n", cbBlock, cbUnaligned, rc);
+ if (RT_SUCCESS(rc))
+ RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
+ }
+
+ rtR0MemBlockFree(pBlock);
+ return NULL;
+}
+
+
+/**
+ * Internal free.
+ */
+static void rtR0MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, void *pvCaller, RT_SRC_POS_DECL)
+{
+ NOREF(enmType); RT_SRC_POS_NOREF();
+
+ /*
+ * Simple case.
+ */
+ if (!pv)
+ return;
+
+ /*
+ * Check watch points.
+ */
+ for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++)
+ if (gapvRTMemFreeWatch[i] == pv)
+ RTAssertDoPanic();
+
+ /*
+ * Find the block.
+ */
+ PRTR0MEMEFBLOCK pBlock = rtR0MemBlockRemove(pv);
+ if (pBlock)
+ {
+ if (gfRTMemFreeLog)
+ RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned);
+
+#ifdef RTR0MEM_EF_NOMAN_FILLER
+ /*
+ * Check whether the no man's land is untouched.
+ */
+# ifdef RTR0MEM_EF_IN_FRONT
+ void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
+ RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned,
+ RTR0MEM_EF_NOMAN_FILLER);
+# else
+ /* Alignment must match allocation alignment in rtMemAlloc(). */
+ void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
+ pBlock->cbAligned - pBlock->cbUnaligned,
+ RTR0MEM_EF_NOMAN_FILLER);
+ if (pvWrong)
+ RTAssertDoPanic();
+ pvWrong = ASMMemFirstMismatchingU8((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK),
+ RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned,
+ RTR0MEM_EF_NOMAN_FILLER);
+# endif
+ if (pvWrong)
+ RTAssertDoPanic();
+#endif
+
+#ifdef RTR0MEM_EF_FREE_FILL
+ /*
+ * Fill the user part of the block.
+ */
+ memset(pv, RTR0MEM_EF_FREE_FILL, pBlock->cbUnaligned);
+#endif
+
+#if defined(RTR0MEM_EF_FREE_DELAYED) && RTR0MEM_EF_FREE_DELAYED > 0
+ /*
+ * We're doing delayed freeing.
+ * That means we'll expand the E-fence to cover the entire block.
+ */
+ int rc = RTR0MemObjProtect(pBlock->hMemObj,
+# ifdef RTR0MEM_EF_IN_FRONT
+ RTR0MEM_EF_SIZE,
+# else
+ 0 /*offSub*/,
+# endif
+ RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE),
+ RTMEM_PROT_NONE);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Insert it into the free list and process pending frees.
+ */
+ rtR0MemBlockDelayInsert(pBlock);
+ while ((pBlock = rtR0MemBlockDelayRemove()) != NULL)
+ rtR0MemFreeBlock(pBlock, pszOp);
+ }
+ else
+ rtR0MemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc);
+
+#else /* !RTR0MEM_EF_FREE_DELAYED */
+ rtR0MemFreeBlock(pBlock, pszOp);
+#endif /* !RTR0MEM_EF_FREE_DELAYED */
+ }
+ else
+ rtR0MemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv);
+}
+
+
+/**
+ * Internal realloc.
+ */
+static void *rtR0MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew,
+ const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
+{
+ /*
+ * Allocate new and copy.
+ */
+ if (!pvOld)
+ return rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
+ if (!cbNew)
+ {
+ rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS);
+ return NULL;
+ }
+
+ /*
+ * Get the block, allocate the new, copy the data, free the old one.
+ */
+ PRTR0MEMEFBLOCK pBlock = rtR0MemBlockGet(pvOld);
+ if (pBlock)
+ {
+ void *pvRet = rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
+ if (pvRet)
+ {
+ memcpy(pvRet, pvOld, RT_MIN(cbNew, pBlock->cbUnaligned));
+ rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS);
+ }
+ return pvRet;
+ }
+ rtR0MemComplain(pszOp, "pvOld=%p was not found!\n", pvOld);
+ return NULL;
+}
+
+
+
+
+RTDECL(void *) RTMemEfTmpAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
+{
+ return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
+}
+
+
+RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
+{
+ return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
+}
+
+
+RTDECL(void) RTMemEfTmpFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
+{
+ if (pv)
+ rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS);
+}
+
+
+RTDECL(void *) RTMemEfAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
+{
+ return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
+}
+
+
+RTDECL(void *) RTMemEfAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
+{
+ return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
+}
+
+
+RTDECL(void *) RTMemEfAllocVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
+{
+ size_t cbAligned;
+ if (cbUnaligned >= 16)
+ cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
+ else
+ cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
+ return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
+}
+
+
+RTDECL(void *) RTMemEfAllocZVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
+{
+ size_t cbAligned;
+ if (cbUnaligned >= 16)
+ cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
+ else
+ cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
+ return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
+}
+
+
+RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
+{
+ return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
+}
+
+
+RTDECL(void) RTMemEfFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
+{
+ if (pv)
+ rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS);
+}
+
+
+RTDECL(void *) RTMemEfDup(const void *pvSrc, size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
+{
+ void *pvDst = RTMemEfAlloc(cb, pszTag, RT_SRC_POS_ARGS);
+ if (pvDst)
+ memcpy(pvDst, pvSrc, cb);
+ return pvDst;
+}
+
+
+RTDECL(void *) RTMemEfDupEx(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
+{
+ void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, RT_SRC_POS_ARGS);
+ if (pvDst)
+ {
+ memcpy(pvDst, pvSrc, cbSrc);
+ memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
+ }
+ return pvDst;
+}
+
+
+
+
+/*
+ *
+ * The NP (no position) versions.
+ *
+ */
+
+
+
+RTDECL(void *) RTMemEfTmpAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
+{
+ return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
+}
+
+
+RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
+{
+ return rtR0MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
+}
+
+
+RTDECL(void) RTMemEfTmpFreeNP(void *pv) RT_NO_THROW_DEF
+{
+ if (pv)
+ rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL);
+}
+
+
+RTDECL(void *) RTMemEfAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
+{
+ return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
+}
+
+
+RTDECL(void *) RTMemEfAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
+{
+ return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
+}
+
+
+RTDECL(void *) RTMemEfAllocVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
+{
+ size_t cbAligned;
+ if (cbUnaligned >= 16)
+ cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
+ else
+ cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
+ return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
+}
+
+
+RTDECL(void *) RTMemEfAllocZVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
+{
+ size_t cbAligned;
+ if (cbUnaligned >= 16)
+ cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
+ else
+ cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
+ return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
+}
+
+
+RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
+{
+ return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
+}
+
+
+RTDECL(void) RTMemEfFreeNP(void *pv) RT_NO_THROW_DEF
+{
+ if (pv)
+ rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL);
+}
+
+
+RTDECL(void *) RTMemEfDupNP(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW_DEF
+{
+ void *pvDst = RTMemEfAlloc(cb, pszTag, NULL, 0, NULL);
+ if (pvDst)
+ memcpy(pvDst, pvSrc, cb);
+ return pvDst;
+}
+
+
+RTDECL(void *) RTMemEfDupExNP(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW_DEF
+{
+ void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, NULL, 0, NULL);
+ if (pvDst)
+ {
+ memcpy(pvDst, pvSrc, cbSrc);
+ memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
+ }
+ return pvDst;
+}
+
diff --git a/src/VBox/Runtime/r0drv/alloc-r0drv.cpp b/src/VBox/Runtime/r0drv/alloc-r0drv.cpp
new file mode 100644
index 00000000..5c4d12b5
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/alloc-r0drv.cpp
@@ -0,0 +1,438 @@
+/* $Id: alloc-r0drv.cpp $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTMEM_NO_WRAP_TO_EF_APIS
+#include <iprt/mem.h>
+#include "internal/iprt.h"
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#ifdef RT_MORE_STRICT
+# include <iprt/mp.h>
+#endif
+#include <iprt/param.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include "r0drv/alloc-r0drv.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#ifdef RT_STRICT
+# define RTR0MEM_STRICT
+#endif
+
+#ifdef RTR0MEM_STRICT
+# define RTR0MEM_FENCE_EXTRA 16
+#else
+# define RTR0MEM_FENCE_EXTRA 0
+#endif
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+#ifdef RTR0MEM_STRICT
+/** Fence data. */
+static uint8_t const g_abFence[RTR0MEM_FENCE_EXTRA] =
+{
+ 0x77, 0x88, 0x66, 0x99, 0x55, 0xaa, 0x44, 0xbb,
+ 0x33, 0xcc, 0x22, 0xdd, 0x11, 0xee, 0x00, 0xff
+};
+#endif
+
+
+/**
+ * Wrapper around rtR0MemAllocEx.
+ *
+ * @returns Pointer to the allocated memory block header.
+ * @param cb The number of bytes to allocate (sans header).
+ * @param fFlags The allocation flags.
+ */
+DECLINLINE(PRTMEMHDR) rtR0MemAlloc(size_t cb, uint32_t fFlags)
+{
+ PRTMEMHDR pHdr;
+ int rc = rtR0MemAllocEx(cb, fFlags, &pHdr);
+ if (RT_FAILURE(rc))
+ return NULL;
+ return pHdr;
+}
+
+
+RTDECL(void *) RTMemTmpAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
+{
+ return RTMemAllocTag(cb, pszTag);
+}
+RT_EXPORT_SYMBOL(RTMemTmpAllocTag);
+
+
+RTDECL(void *) RTMemTmpAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
+{
+ return RTMemAllocZTag(cb, pszTag);
+}
+RT_EXPORT_SYMBOL(RTMemTmpAllocZTag);
+
+
+RTDECL(void) RTMemTmpFree(void *pv) RT_NO_THROW_DEF
+{
+ return RTMemFree(pv);
+}
+RT_EXPORT_SYMBOL(RTMemTmpFree);
+
+
+
+
+
+RTDECL(void *) RTMemAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
+{
+ PRTMEMHDR pHdr;
+ RT_ASSERT_INTS_ON();
+ RT_NOREF_PV(pszTag);
+
+ pHdr = rtR0MemAlloc(cb + RTR0MEM_FENCE_EXTRA, 0);
+ if (pHdr)
+ {
+#ifdef RTR0MEM_STRICT
+ pHdr->cbReq = (uint32_t)cb; Assert(pHdr->cbReq == cb);
+ memcpy((uint8_t *)(pHdr + 1) + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
+#endif
+ return pHdr + 1;
+ }
+ return NULL;
+}
+RT_EXPORT_SYMBOL(RTMemAllocTag);
+
+
+RTDECL(void *) RTMemAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
+{
+ PRTMEMHDR pHdr;
+ RT_ASSERT_INTS_ON();
+ RT_NOREF_PV(pszTag);
+
+ pHdr = rtR0MemAlloc(cb + RTR0MEM_FENCE_EXTRA, RTMEMHDR_FLAG_ZEROED);
+ if (pHdr)
+ {
+#ifdef RTR0MEM_STRICT
+ pHdr->cbReq = (uint32_t)cb; Assert(pHdr->cbReq == cb);
+ memcpy((uint8_t *)(pHdr + 1) + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
+ return memset(pHdr + 1, 0, cb);
+#else
+ return memset(pHdr + 1, 0, pHdr->cb);
+#endif
+ }
+ return NULL;
+}
+RT_EXPORT_SYMBOL(RTMemAllocZTag);
+
+
+RTDECL(void *) RTMemAllocVarTag(size_t cbUnaligned, const char *pszTag)
+{
+ size_t cbAligned;
+ if (cbUnaligned >= 16)
+ cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
+ else
+ cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
+ return RTMemAllocTag(cbAligned, pszTag);
+}
+RT_EXPORT_SYMBOL(RTMemAllocVarTag);
+
+
+RTDECL(void *) RTMemAllocZVarTag(size_t cbUnaligned, const char *pszTag)
+{
+ size_t cbAligned;
+ if (cbUnaligned >= 16)
+ cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
+ else
+ cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
+ return RTMemAllocZTag(cbAligned, pszTag);
+}
+RT_EXPORT_SYMBOL(RTMemAllocZVarTag);
+
+
+RTDECL(void *) RTMemReallocTag(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
+{
+ PRTMEMHDR pHdrOld;
+
+ /* Free. */
+ if (!cbNew && pvOld)
+ {
+ RTMemFree(pvOld);
+ return NULL;
+ }
+
+ /* Alloc. */
+ if (!pvOld)
+ return RTMemAllocTag(cbNew, pszTag);
+
+ /*
+ * Realloc.
+ */
+ pHdrOld = (PRTMEMHDR)pvOld - 1;
+ RT_ASSERT_PREEMPTIBLE();
+
+ if (pHdrOld->u32Magic == RTMEMHDR_MAGIC)
+ {
+ PRTMEMHDR pHdrNew;
+
+ /* If there is sufficient space in the old block and we don't cause
+ substantial internal fragmentation, reuse the old block. */
+ if ( pHdrOld->cb >= cbNew + RTR0MEM_FENCE_EXTRA
+ && pHdrOld->cb - (cbNew + RTR0MEM_FENCE_EXTRA) <= 128)
+ {
+ pHdrOld->cbReq = (uint32_t)cbNew; Assert(pHdrOld->cbReq == cbNew);
+#ifdef RTR0MEM_STRICT
+ memcpy((uint8_t *)(pHdrOld + 1) + cbNew, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
+#endif
+ return pvOld;
+ }
+
+ /* Allocate a new block and copy over the content. */
+ pHdrNew = rtR0MemAlloc(cbNew + RTR0MEM_FENCE_EXTRA, 0);
+ if (pHdrNew)
+ {
+ size_t cbCopy = RT_MIN(pHdrOld->cb, pHdrNew->cb);
+ memcpy(pHdrNew + 1, pvOld, cbCopy);
+#ifdef RTR0MEM_STRICT
+ pHdrNew->cbReq = (uint32_t)cbNew; Assert(pHdrNew->cbReq == cbNew);
+ memcpy((uint8_t *)(pHdrNew + 1) + cbNew, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
+ AssertReleaseMsg(!memcmp((uint8_t *)(pHdrOld + 1) + pHdrOld->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
+ ("pHdr=%p pvOld=%p cbReq=%u cb=%u cbNew=%zu fFlags=%#x\n"
+ "fence: %.*Rhxs\n"
+ "expected: %.*Rhxs\n",
+ pHdrOld, pvOld, pHdrOld->cbReq, pHdrOld->cb, cbNew, pHdrOld->fFlags,
+ RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdrOld + 1) + pHdrOld->cbReq,
+ RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
+#endif
+ rtR0MemFree(pHdrOld);
+ return pHdrNew + 1;
+ }
+ }
+ else
+ AssertMsgFailed(("pHdrOld->u32Magic=%RX32 pvOld=%p cbNew=%#zx\n", pHdrOld->u32Magic, pvOld, cbNew));
+
+ return NULL;
+}
+RT_EXPORT_SYMBOL(RTMemReallocTag);
+
+
+RTDECL(void) RTMemFree(void *pv) RT_NO_THROW_DEF
+{
+ PRTMEMHDR pHdr;
+ RT_ASSERT_INTS_ON();
+
+ if (!pv)
+ return;
+ pHdr = (PRTMEMHDR)pv - 1;
+ if (pHdr->u32Magic == RTMEMHDR_MAGIC)
+ {
+ Assert(!(pHdr->fFlags & RTMEMHDR_FLAG_ALLOC_EX));
+ Assert(!(pHdr->fFlags & RTMEMHDR_FLAG_EXEC));
+#ifdef RTR0MEM_STRICT
+ AssertReleaseMsg(!memcmp((uint8_t *)(pHdr + 1) + pHdr->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
+ ("pHdr=%p pv=%p cbReq=%u cb=%u fFlags=%#x\n"
+ "fence: %.*Rhxs\n"
+ "expected: %.*Rhxs\n",
+ pHdr, pv, pHdr->cbReq, pHdr->cb, pHdr->fFlags,
+ RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdr + 1) + pHdr->cbReq,
+ RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
+#endif
+ rtR0MemFree(pHdr);
+ }
+ else
+ AssertMsgFailed(("pHdr->u32Magic=%RX32 pv=%p\n", pHdr->u32Magic, pv));
+}
+RT_EXPORT_SYMBOL(RTMemFree);
+
+
+
+
+
+
+RTDECL(void *) RTMemExecAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
+{
+ PRTMEMHDR pHdr;
+#ifdef RT_OS_SOLARIS /** @todo figure out why */
+ RT_ASSERT_INTS_ON();
+#else
+ RT_ASSERT_PREEMPTIBLE();
+#endif
+ RT_NOREF_PV(pszTag);
+
+
+ pHdr = rtR0MemAlloc(cb + RTR0MEM_FENCE_EXTRA, RTMEMHDR_FLAG_EXEC);
+ if (pHdr)
+ {
+#ifdef RTR0MEM_STRICT
+ pHdr->cbReq = (uint32_t)cb; Assert(pHdr->cbReq == cb);
+ memcpy((uint8_t *)(pHdr + 1) + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
+#endif
+ return pHdr + 1;
+ }
+ return NULL;
+}
+RT_EXPORT_SYMBOL(RTMemExecAllocTag);
+
+
+RTDECL(void) RTMemExecFree(void *pv, size_t cb) RT_NO_THROW_DEF
+{
+ PRTMEMHDR pHdr;
+ RT_ASSERT_INTS_ON();
+ RT_NOREF_PV(cb);
+
+ if (!pv)
+ return;
+ pHdr = (PRTMEMHDR)pv - 1;
+ if (pHdr->u32Magic == RTMEMHDR_MAGIC)
+ {
+ Assert(!(pHdr->fFlags & RTMEMHDR_FLAG_ALLOC_EX));
+#ifdef RTR0MEM_STRICT
+ AssertReleaseMsg(!memcmp((uint8_t *)(pHdr + 1) + pHdr->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
+ ("pHdr=%p pv=%p cbReq=%u cb=%u fFlags=%#x\n"
+ "fence: %.*Rhxs\n"
+ "expected: %.*Rhxs\n",
+ pHdr, pv, pHdr->cbReq, pHdr->cb, pHdr->fFlags,
+ RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdr + 1) + pHdr->cbReq,
+ RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
+#endif
+ rtR0MemFree(pHdr);
+ }
+ else
+ AssertMsgFailed(("pHdr->u32Magic=%RX32 pv=%p\n", pHdr->u32Magic, pv));
+}
+RT_EXPORT_SYMBOL(RTMemExecFree);
+
+
+
+
+RTDECL(int) RTMemAllocExTag(size_t cb, size_t cbAlignment, uint32_t fFlags, const char *pszTag, void **ppv) RT_NO_THROW_DEF
+{
+ uint32_t fHdrFlags = RTMEMHDR_FLAG_ALLOC_EX;
+ PRTMEMHDR pHdr;
+ int rc;
+ RT_NOREF_PV(pszTag);
+
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+ if (!(fFlags & RTMEMALLOCEX_FLAGS_ANY_CTX_ALLOC))
+ RT_ASSERT_INTS_ON();
+
+ /*
+ * Fake up some alignment support.
+ */
+ AssertMsgReturn(cbAlignment <= sizeof(void *), ("%zu (%#x)\n", cbAlignment, cbAlignment), VERR_UNSUPPORTED_ALIGNMENT);
+ if (cb < cbAlignment)
+ cb = cbAlignment;
+
+ /*
+ * Validate and convert flags.
+ */
+ AssertMsgReturn(!(fFlags & ~RTMEMALLOCEX_FLAGS_VALID_MASK_R0), ("%#x\n", fFlags), VERR_INVALID_PARAMETER);
+ if (fFlags & RTMEMALLOCEX_FLAGS_ZEROED)
+ fHdrFlags |= RTMEMHDR_FLAG_ZEROED;
+ if (fFlags & RTMEMALLOCEX_FLAGS_EXEC)
+ fHdrFlags |= RTMEMHDR_FLAG_EXEC;
+ if (fFlags & RTMEMALLOCEX_FLAGS_ANY_CTX_ALLOC)
+ fHdrFlags |= RTMEMHDR_FLAG_ANY_CTX_ALLOC;
+ if (fFlags & RTMEMALLOCEX_FLAGS_ANY_CTX_FREE)
+ fHdrFlags |= RTMEMHDR_FLAG_ANY_CTX_FREE;
+
+ /*
+ * Do the allocation.
+ */
+ rc = rtR0MemAllocEx(cb + RTR0MEM_FENCE_EXTRA, fHdrFlags, &pHdr);
+ if (RT_SUCCESS(rc))
+ {
+ void *pv;
+
+ Assert(pHdr->cbReq == cb + RTR0MEM_FENCE_EXTRA);
+ Assert((pHdr->fFlags & fFlags) == fFlags);
+
+ /*
+ * Calc user pointer, initialize the memory if requested, and if
+ * memory strictness is enable set up the fence.
+ */
+ pv = pHdr + 1;
+ *ppv = pv;
+ if (fFlags & RTMEMHDR_FLAG_ZEROED)
+ memset(pv, 0, pHdr->cb);
+
+#ifdef RTR0MEM_STRICT
+ pHdr->cbReq = (uint32_t)cb;
+ memcpy((uint8_t *)pv + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
+#endif
+ }
+ else if (rc == VERR_NO_MEMORY && (fFlags & RTMEMALLOCEX_FLAGS_EXEC))
+ rc = VERR_NO_EXEC_MEMORY;
+
+ RT_ASSERT_PREEMPT_CPUID();
+ return rc;
+}
+RT_EXPORT_SYMBOL(RTMemAllocExTag);
+
+
+RTDECL(void) RTMemFreeEx(void *pv, size_t cb) RT_NO_THROW_DEF
+{
+ PRTMEMHDR pHdr;
+ RT_NOREF_PV(cb);
+
+ if (!pv)
+ return;
+
+ AssertPtr(pv);
+ pHdr = (PRTMEMHDR)pv - 1;
+ if (pHdr->u32Magic == RTMEMHDR_MAGIC)
+ {
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+
+ Assert(pHdr->fFlags & RTMEMHDR_FLAG_ALLOC_EX);
+ if (!(pHdr->fFlags & RTMEMHDR_FLAG_ANY_CTX_FREE))
+ RT_ASSERT_INTS_ON();
+ AssertMsg(pHdr->cbReq == cb, ("cbReq=%zu cb=%zu\n", pHdr->cb, cb));
+
+#ifdef RTR0MEM_STRICT
+ AssertReleaseMsg(!memcmp((uint8_t *)(pHdr + 1) + pHdr->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
+ ("pHdr=%p pv=%p cbReq=%u cb=%u fFlags=%#x\n"
+ "fence: %.*Rhxs\n"
+ "expected: %.*Rhxs\n",
+ pHdr, pv, pHdr->cbReq, pHdr->cb, pHdr->fFlags,
+ RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdr + 1) + pHdr->cbReq,
+ RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
+#endif
+ rtR0MemFree(pHdr);
+ RT_ASSERT_PREEMPT_CPUID();
+ }
+ else
+ AssertMsgFailed(("pHdr->u32Magic=%RX32 pv=%p\n", pHdr->u32Magic, pv));
+}
+RT_EXPORT_SYMBOL(RTMemFreeEx);
+
diff --git a/src/VBox/Runtime/r0drv/alloc-r0drv.h b/src/VBox/Runtime/r0drv/alloc-r0drv.h
new file mode 100644
index 00000000..80aca678
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/alloc-r0drv.h
@@ -0,0 +1,104 @@
+/* $Id: alloc-r0drv.h $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_alloc_r0drv_h
+#define IPRT_INCLUDED_SRC_r0drv_alloc_r0drv_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/cdefs.h>
+#include <iprt/types.h>
+#include <iprt/mem.h>
+#include "internal/magics.h"
+
+RT_C_DECLS_BEGIN
+
+/**
+ * Header which heading all memory blocks.
+ */
+typedef struct RTMEMHDR
+{
+ /** Magic (RTMEMHDR_MAGIC). */
+ uint32_t u32Magic;
+ /** Block flags (RTMEMHDR_FLAG_*). */
+ uint32_t fFlags;
+ /** The actual size of the block, header not included. */
+ uint32_t cb;
+ /** The requested allocation size. */
+ uint32_t cbReq;
+} RTMEMHDR, *PRTMEMHDR;
+
+
+/** @name RTMEMHDR::fFlags.
+ * @{ */
+/** Clear the allocated memory. */
+#define RTMEMHDR_FLAG_ZEROED RT_BIT(0)
+/** Executable flag. */
+#define RTMEMHDR_FLAG_EXEC RT_BIT(1)
+/** Use allocation method suitable for any context. */
+#define RTMEMHDR_FLAG_ANY_CTX_ALLOC RT_BIT(2)
+/** Use allocation method which allow for freeing in any context. */
+#define RTMEMHDR_FLAG_ANY_CTX_FREE RT_BIT(3)
+/** Both alloc and free in any context (or we're just darn lazy). */
+#define RTMEMHDR_FLAG_ANY_CTX (RTMEMHDR_FLAG_ANY_CTX_ALLOC | RTMEMHDR_FLAG_ANY_CTX_FREE)
+/** Indicate that it was allocated by rtR0MemAllocExTag. */
+#define RTMEMHDR_FLAG_ALLOC_EX RT_BIT(4)
+#ifdef RT_OS_LINUX
+/** Linux: Allocated using vm_area hacks. */
+# define RTMEMHDR_FLAG_EXEC_VM_AREA RT_BIT(29)
+/** Linux: Allocated from the special heap for executable memory. */
+# define RTMEMHDR_FLAG_EXEC_HEAP RT_BIT(30)
+/** Linux: Allocated by kmalloc() instead of vmalloc(). */
+# define RTMEMHDR_FLAG_KMALLOC RT_BIT(31)
+#endif
+/** @} */
+
+
+/**
+ * Heap allocation back end for ring-0.
+ *
+ * @returns IPRT status code. VERR_NO_MEMORY suffices for RTMEMHDR_FLAG_EXEC,
+ * the caller will change it to VERR_NO_EXEC_MEMORY when appropriate.
+ *
+ * @param cb The amount of memory requested by the user. This does
+ * not include the header.
+ * @param fFlags The allocation flags and more. These should be
+ * assigned to RTMEMHDR::fFlags together with any flags
+ * the backend might be using.
+ * @param ppHdr Where to return the memory header on success.
+ */
+DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr);
+
+/**
+ * Free memory allocated by rtR0MemAllocEx.
+ * @param pHdr The memory block to free. (Never NULL.)
+ */
+DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr);
+
+RT_C_DECLS_END
+#endif /* !IPRT_INCLUDED_SRC_r0drv_alloc_r0drv_h */
+
diff --git a/src/VBox/Runtime/r0drv/darwin/Makefile.kup b/src/VBox/Runtime/r0drv/darwin/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/Makefile.kup
diff --git a/src/VBox/Runtime/r0drv/darwin/RTLogWriteDebugger-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/RTLogWriteDebugger-r0drv-darwin.cpp
new file mode 100644
index 00000000..5263d216
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/RTLogWriteDebugger-r0drv-darwin.cpp
@@ -0,0 +1,42 @@
+/* $Id: RTLogWriteDebugger-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Log To Debugger, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/log.h>
+
+
+RTDECL(void) RTLogWriteDebugger(const char *pch, size_t cb)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+ kprintf("%.*s", (int)cb, pch);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/RTLogWriteStdOut-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/RTLogWriteStdOut-r0drv-darwin.cpp
new file mode 100644
index 00000000..169d2056
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/RTLogWriteStdOut-r0drv-darwin.cpp
@@ -0,0 +1,42 @@
+/* $Id: RTLogWriteStdOut-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Log To StdOut, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/log.h>
+
+
+RTDECL(void) RTLogWriteStdOut(const char *pch, size_t cb)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+ printf("%.*s", (int)cb, pch);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/alloc-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/alloc-r0drv-darwin.cpp
new file mode 100644
index 00000000..843c068a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/alloc-r0drv-darwin.cpp
@@ -0,0 +1,191 @@
+/* $Id: alloc-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations" /* (IOMallocContiguous et al are deprecated) */
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+#include <iprt/memobj.h>
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/thread.h>
+#include "r0drv/alloc-r0drv.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Extended header used for headers marked with RTMEMHDR_FLAG_EXEC.
+ *
+ * This is used with allocating executable memory, for things like generated
+ * code and loaded modules.
+ */
+typedef struct RTMEMDARWINHDREX
+{
+ /** The associated memory object. */
+ RTR0MEMOBJ hMemObj;
+ /** Alignment padding. */
+ uint8_t abPadding[ARCH_BITS == 32 ? 12 : 8];
+ /** The header we present to the generic API. */
+ RTMEMHDR Hdr;
+} RTMEMDARWINHDREX;
+AssertCompileSize(RTMEMDARWINHDREX, 32);
+/** Pointer to an extended memory header. */
+typedef RTMEMDARWINHDREX *PRTMEMDARWINHDREX;
+
+
+/**
+ * OS specific allocation function.
+ */
+DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ if (RT_UNLIKELY(fFlags & RTMEMHDR_FLAG_ANY_CTX))
+ return VERR_NOT_SUPPORTED;
+
+ PRTMEMHDR pHdr;
+ if (fFlags & RTMEMHDR_FLAG_EXEC)
+ {
+ RTR0MEMOBJ hMemObj;
+ int rc = RTR0MemObjAllocPage(&hMemObj, cb + sizeof(RTMEMDARWINHDREX), true /*fExecutable*/);
+ if (RT_FAILURE(rc))
+ {
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+ }
+ PRTMEMDARWINHDREX pExHdr = (PRTMEMDARWINHDREX)RTR0MemObjAddress(hMemObj);
+ pExHdr->hMemObj = hMemObj;
+ pHdr = &pExHdr->Hdr;
+#if 1 /*fExecutable isn't currently honored above. */
+ rc = RTR0MemObjProtect(hMemObj, 0, RTR0MemObjSize(hMemObj), RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
+ AssertRC(rc);
+#endif
+ }
+ else
+ {
+ pHdr = (PRTMEMHDR)IOMalloc(cb + sizeof(*pHdr));
+ if (RT_UNLIKELY(!pHdr))
+ {
+ printf("rtR0MemAllocEx(%#zx, %#x) failed\n", cb + sizeof(*pHdr), fFlags);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+ }
+ }
+
+ pHdr->u32Magic = RTMEMHDR_MAGIC;
+ pHdr->fFlags = fFlags;
+ pHdr->cb = cb;
+ pHdr->cbReq = cb;
+ *ppHdr = pHdr;
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * OS specific free function.
+ */
+DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ pHdr->u32Magic += 1;
+ if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC)
+ {
+ PRTMEMDARWINHDREX pExHdr = RT_FROM_MEMBER(pHdr, RTMEMDARWINHDREX, Hdr);
+ int rc = RTR0MemObjFree(pExHdr->hMemObj, false /*fFreeMappings*/);
+ AssertRC(rc);
+ }
+ else
+ IOFree(pHdr, pHdr->cb + sizeof(*pHdr));
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+}
+
+
+RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
+{
+ /*
+ * validate input.
+ */
+ AssertPtr(pPhys);
+ Assert(cb > 0);
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Allocate the memory and ensure that the API is still providing
+ * memory that's always below 4GB.
+ */
+ cb = RT_ALIGN_Z(cb, PAGE_SIZE);
+ IOPhysicalAddress PhysAddr;
+ void *pv = IOMallocContiguous(cb, PAGE_SIZE, &PhysAddr);
+ if (pv)
+ {
+ if (PhysAddr + (cb - 1) <= (IOPhysicalAddress)0xffffffff)
+ {
+ if (!((uintptr_t)pv & PAGE_OFFSET_MASK))
+ {
+ *pPhys = PhysAddr;
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return pv;
+ }
+ AssertMsgFailed(("IOMallocContiguous didn't return a page aligned address - %p!\n", pv));
+ }
+ else
+ AssertMsgFailed(("IOMallocContiguous returned high address! PhysAddr=%RX64 cb=%#zx\n", (uint64_t)PhysAddr, cb));
+ IOFreeContiguous(pv, cb);
+ }
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return NULL;
+}
+
+
+RTR0DECL(void) RTMemContFree(void *pv, size_t cb)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ if (pv)
+ {
+ Assert(cb > 0);
+ AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv));
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ cb = RT_ALIGN_Z(cb, PAGE_SIZE);
+ IOFreeContiguous(pv, cb);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/assert-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/assert-r0drv-darwin.cpp
new file mode 100644
index 00000000..a4547a81
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/assert-r0drv-darwin.cpp
@@ -0,0 +1,72 @@
+/* $Id: assert-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Assertion Workers, Ring-0 Drivers, Darwin.
+ */
+
+/*
+ * Copyright (C) 2007-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/assert.h>
+
+#include <iprt/asm.h>
+#include <iprt/log.h>
+#include <iprt/stdarg.h>
+#include <iprt/string.h>
+
+#include "internal/assert.h"
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+ printf("\r\n!!Assertion Failed!!\r\n"
+ "Expression: %s\r\n"
+ "Location : %s(%u) %s\r\n",
+ pszExpr, pszFile, uLine, pszFunction);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+}
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg2V(bool fInitial, const char *pszFormat, va_list va)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+ char szMsg[256];
+
+ RTStrPrintfV(szMsg, sizeof(szMsg) - 1, pszFormat, va);
+ szMsg[sizeof(szMsg) - 1] = '\0';
+ printf("%s", szMsg);
+
+ NOREF(fInitial);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+}
+
+
+RTR0DECL(void) RTR0AssertPanicSystem(void)
+{
+ panic("%s%s", g_szRTAssertMsg1, g_szRTAssertMsg2);
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/dbgkrnlinfo-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/dbgkrnlinfo-r0drv-darwin.cpp
new file mode 100644
index 00000000..b5fe476b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/dbgkrnlinfo-r0drv-darwin.cpp
@@ -0,0 +1,1083 @@
+/* $Id: dbgkrnlinfo-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Kernel Debug Information, R0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2011-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#ifdef IN_RING0
+# include "the-darwin-kernel.h"
+# include <sys/kauth.h>
+RT_C_DECLS_BEGIN /* Buggy 10.4 headers, fixed in 10.5. */
+# include <sys/kpi_mbuf.h>
+# include <net/kpi_interfacefilter.h>
+# include <sys/kpi_socket.h>
+# include <sys/kpi_socketfilter.h>
+RT_C_DECLS_END
+# include <sys/buf.h>
+# include <sys/vm.h>
+# include <sys/vnode_if.h>
+/*# include <sys/sysctl.h>*/
+# include <sys/systm.h>
+# include <vfs/vfs_support.h>
+/*# include <miscfs/specfs/specdev.h>*/
+#else
+# include <stdio.h> /* for printf */
+#endif
+
+#if !defined(IN_RING0) && !defined(DOXYGEN_RUNNING) /* A linking tweak for the testcase: */
+# include <iprt/cdefs.h>
+# undef RTR0DECL
+# define RTR0DECL(type) DECLHIDDEN(type) RTCALL
+#endif
+
+#include "internal/iprt.h"
+#include <iprt/dbg.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/assert.h>
+#include <iprt/file.h>
+#include <iprt/log.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+#include <iprt/formats/mach-o.h>
+#include "internal/magics.h"
+
+/** @def MY_CPU_TYPE
+ * The CPU type targeted by the compiler. */
+/** @def MY_CPU_TYPE
+ * The "ALL" CPU subtype targeted by the compiler. */
+/** @def MY_MACHO_HEADER
+ * The Mach-O header targeted by the compiler. */
+/** @def MY_MACHO_MAGIC
+ * The Mach-O header magic we're targeting. */
+/** @def MY_SEGMENT_COMMAND
+ * The segment command targeted by the compiler. */
+/** @def MY_SECTION
+ * The section struture targeted by the compiler. */
+/** @def MY_NLIST
+ * The symbol table entry targeted by the compiler. */
+#ifdef RT_ARCH_X86
+# define MY_CPU_TYPE CPU_TYPE_I386
+# define MY_CPU_SUBTYPE_ALL CPU_SUBTYPE_I386_ALL
+# define MY_MACHO_HEADER mach_header_32_t
+# define MY_MACHO_MAGIC IMAGE_MACHO32_SIGNATURE
+# define MY_SEGMENT_COMMAND segment_command_32_t
+# define MY_SECTION section_32_t
+# define MY_NLIST macho_nlist_32_t
+
+#elif defined(RT_ARCH_AMD64)
+# define MY_CPU_TYPE CPU_TYPE_X86_64
+# define MY_CPU_SUBTYPE_ALL CPU_SUBTYPE_X86_64_ALL
+# define MY_MACHO_HEADER mach_header_64_t
+# define MY_MACHO_MAGIC IMAGE_MACHO64_SIGNATURE
+# define MY_SEGMENT_COMMAND segment_command_64_t
+# define MY_SECTION section_64_t
+# define MY_NLIST macho_nlist_64_t
+
+#else
+# error "Port me!"
+#endif
+
+/** @name Return macros for make it simpler to track down too paranoid code.
+ * @{
+ */
+#ifdef DEBUG
+# define RETURN_VERR_BAD_EXE_FORMAT \
+ do { Assert(!g_fBreakpointOnError); return VERR_BAD_EXE_FORMAT; } while (0)
+# define RETURN_VERR_LDR_UNEXPECTED \
+ do { Assert(!g_fBreakpointOnError); return VERR_LDR_UNEXPECTED; } while (0)
+# define RETURN_VERR_LDR_ARCH_MISMATCH \
+ do { Assert(!g_fBreakpointOnError); return VERR_LDR_ARCH_MISMATCH; } while (0)
+#else
+# define RETURN_VERR_BAD_EXE_FORMAT do { return VERR_BAD_EXE_FORMAT; } while (0)
+# define RETURN_VERR_LDR_UNEXPECTED do { return VERR_LDR_UNEXPECTED; } while (0)
+# define RETURN_VERR_LDR_ARCH_MISMATCH do { return VERR_LDR_ARCH_MISMATCH; } while (0)
+#endif
+/** @} */
+
+#define VERR_LDR_UNEXPECTED (-641)
+
+#ifndef RT_OS_DARWIN
+# define MAC_OS_X_VERSION_MIN_REQUIRED 1050
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Our internal representation of the mach_kernel after loading it's symbols
+ * and successfully resolving their addresses.
+ */
+typedef struct RTDBGKRNLINFOINT
+{
+ /** Magic value (RTDBGKRNLINFO_MAGIC). */
+ uint32_t u32Magic;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+
+ /** @name Result.
+ * @{ */
+ /** Pointer to the string table. */
+ char *pachStrTab;
+ /** The size of the string table. */
+ uint32_t cbStrTab;
+ /** The file offset of the string table. */
+ uint32_t offStrTab;
+ /** Pointer to the symbol table. */
+ MY_NLIST *paSyms;
+ /** The size of the symbol table. */
+ uint32_t cSyms;
+ /** The file offset of the symbol table. */
+ uint32_t offSyms;
+ /** Offset between link address and actual load address. */
+ uintptr_t offLoad;
+ /** @} */
+
+ /** @name Used during loading.
+ * @{ */
+ /** The file handle. */
+ RTFILE hFile;
+ /** The architecture image offset (fat_arch_t::offset). */
+ uint64_t offArch;
+ /** The architecture image size (fat_arch_t::size). */
+ uint32_t cbArch;
+ /** The number of load commands (mach_header_XX_t::ncmds). */
+ uint32_t cLoadCmds;
+ /** The size of the load commands. */
+ uint32_t cbLoadCmds;
+ /** The load commands. */
+ load_command_t *pLoadCmds;
+ /** Section pointer table (points into the load commands). */
+ MY_SECTION const *apSections[MACHO_MAX_SECT];
+ /** The number of sections. */
+ uint32_t cSections;
+ /** @} */
+
+ /** Buffer space. */
+ char abBuf[_4K];
+} RTDBGKRNLINFOINT;
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+#ifdef DEBUG
+static bool g_fBreakpointOnError = false;
+#endif
+
+
+/**
+ * Close and free up resources we no longer needs.
+ *
+ * @param pThis The internal scratch data.
+ */
+static void rtR0DbgKrnlDarwinLoadDone(RTDBGKRNLINFOINT *pThis)
+{
+ RTFileClose(pThis->hFile);
+ pThis->hFile = NIL_RTFILE;
+
+ RTMemFree(pThis->pLoadCmds);
+ pThis->pLoadCmds = NULL;
+ memset((void *)&pThis->apSections[0], 0, sizeof(pThis->apSections[0]) * MACHO_MAX_SECT);
+}
+
+
+/**
+ * Looks up a kernel symbol.
+ *
+ * @returns The symbol address on success, 0 on failure.
+ * @param pThis The internal scratch data.
+ * @param pszSymbol The symbol to resolve. Automatically prefixed
+ * with an underscore.
+ */
+static uintptr_t rtR0DbgKrnlDarwinLookup(RTDBGKRNLINFOINT *pThis, const char *pszSymbol)
+{
+ uint32_t const cSyms = pThis->cSyms;
+ MY_NLIST const *pSym = pThis->paSyms;
+
+#if 1
+ /* linear search. */
+ for (uint32_t iSym = 0; iSym < cSyms; iSym++, pSym++)
+ {
+ if (pSym->n_type & MACHO_N_STAB)
+ continue;
+
+ const char *pszTabName= &pThis->pachStrTab[(uint32_t)pSym->n_un.n_strx];
+ if ( *pszTabName == '_'
+ && strcmp(pszTabName + 1, pszSymbol) == 0)
+ return pSym->n_value + pThis->offLoad;
+ }
+#else
+ /** @todo binary search. */
+
+#endif
+ return 0;
+}
+
+
+/* Rainy day: Find the right headers for these symbols ... if there are any. */
+extern "C" void ev_try_lock(void);
+extern "C" void OSMalloc(void);
+extern "C" void OSlibkernInit(void);
+extern "C" void kdp_set_interface(void);
+
+
+/**
+ * Check the symbol table against symbols we known symbols.
+ *
+ * This is done to detect whether the on disk image and the in
+ * memory images matches. Mismatches could stem from user
+ * replacing the default kernel image on disk.
+ *
+ * @returns IPRT status code.
+ * @param pThis The internal scratch data.
+ */
+static int rtR0DbgKrnlDarwinCheckStandardSymbols(RTDBGKRNLINFOINT *pThis)
+{
+ static struct
+ {
+ const char *pszName;
+ uintptr_t uAddr;
+ } const s_aStandardCandles[] =
+ {
+#ifdef IN_RING0
+# define KNOWN_ENTRY(a_Sym) { #a_Sym, (uintptr_t)&a_Sym }
+#else
+# define KNOWN_ENTRY(a_Sym) { #a_Sym, 0 }
+#endif
+ /* IOKit: */
+ KNOWN_ENTRY(IOMalloc),
+ KNOWN_ENTRY(IOFree),
+ KNOWN_ENTRY(IOSleep),
+ KNOWN_ENTRY(IORWLockAlloc),
+ KNOWN_ENTRY(IORecursiveLockLock),
+ KNOWN_ENTRY(IOSimpleLockAlloc),
+ KNOWN_ENTRY(PE_cpu_halt),
+ KNOWN_ENTRY(gIOKitDebug),
+ KNOWN_ENTRY(gIOServicePlane),
+ KNOWN_ENTRY(ev_try_lock),
+
+ /* Libkern: */
+ KNOWN_ENTRY(OSAddAtomic),
+ KNOWN_ENTRY(OSBitAndAtomic),
+ KNOWN_ENTRY(OSBitOrAtomic),
+ KNOWN_ENTRY(OSBitXorAtomic),
+ KNOWN_ENTRY(OSCompareAndSwap),
+ KNOWN_ENTRY(OSMalloc),
+ KNOWN_ENTRY(OSlibkernInit),
+ KNOWN_ENTRY(bcmp),
+ KNOWN_ENTRY(copyout),
+ KNOWN_ENTRY(copyin),
+ KNOWN_ENTRY(kprintf),
+ KNOWN_ENTRY(printf),
+ KNOWN_ENTRY(lck_grp_alloc_init),
+ KNOWN_ENTRY(lck_mtx_alloc_init),
+ KNOWN_ENTRY(lck_rw_alloc_init),
+ KNOWN_ENTRY(lck_spin_alloc_init),
+ KNOWN_ENTRY(osrelease),
+ KNOWN_ENTRY(ostype),
+ KNOWN_ENTRY(panic),
+ KNOWN_ENTRY(strprefix),
+ //KNOWN_ENTRY(sysctlbyname), - we get kernel_sysctlbyname from the 10.10+ kernels.
+ KNOWN_ENTRY(vsscanf),
+ KNOWN_ENTRY(page_mask),
+
+ /* Mach: */
+ KNOWN_ENTRY(absolutetime_to_nanoseconds),
+ KNOWN_ENTRY(assert_wait),
+ KNOWN_ENTRY(clock_delay_until),
+ KNOWN_ENTRY(clock_get_uptime),
+ KNOWN_ENTRY(current_task),
+ KNOWN_ENTRY(current_thread),
+ KNOWN_ENTRY(kernel_task),
+ KNOWN_ENTRY(lck_mtx_sleep),
+ KNOWN_ENTRY(lck_rw_sleep),
+ KNOWN_ENTRY(lck_spin_sleep),
+ KNOWN_ENTRY(mach_absolute_time),
+ KNOWN_ENTRY(semaphore_create),
+ KNOWN_ENTRY(task_reference),
+ KNOWN_ENTRY(thread_block),
+ KNOWN_ENTRY(thread_reference),
+ KNOWN_ENTRY(thread_terminate),
+ KNOWN_ENTRY(thread_wakeup_prim),
+
+ /* BSDKernel: */
+ KNOWN_ENTRY(buf_size),
+ KNOWN_ENTRY(copystr),
+ KNOWN_ENTRY(current_proc),
+ KNOWN_ENTRY(ifnet_hdrlen),
+ KNOWN_ENTRY(ifnet_set_promiscuous),
+ KNOWN_ENTRY(kauth_getuid),
+#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
+ KNOWN_ENTRY(kauth_cred_unref),
+#else
+ KNOWN_ENTRY(kauth_cred_rele),
+#endif
+ KNOWN_ENTRY(mbuf_data),
+ KNOWN_ENTRY(msleep),
+ KNOWN_ENTRY(nanotime),
+ KNOWN_ENTRY(nop_close),
+ KNOWN_ENTRY(proc_pid),
+ KNOWN_ENTRY(sock_accept),
+ KNOWN_ENTRY(sockopt_name),
+ //KNOWN_ENTRY(spec_write),
+ KNOWN_ENTRY(suword),
+ //KNOWN_ENTRY(sysctl_int),
+ KNOWN_ENTRY(uio_rw),
+ KNOWN_ENTRY(vfs_flags),
+ KNOWN_ENTRY(vfs_name),
+ KNOWN_ENTRY(vfs_statfs),
+ KNOWN_ENTRY(VNOP_READ),
+ KNOWN_ENTRY(uio_create),
+ KNOWN_ENTRY(uio_addiov),
+ KNOWN_ENTRY(uio_free),
+ KNOWN_ENTRY(vnode_get),
+ KNOWN_ENTRY(vnode_open),
+ KNOWN_ENTRY(vnode_ref),
+ KNOWN_ENTRY(vnode_rele),
+ KNOWN_ENTRY(vnop_close_desc),
+ KNOWN_ENTRY(wakeup),
+ KNOWN_ENTRY(wakeup_one),
+
+ /* Unsupported: */
+ KNOWN_ENTRY(kdp_set_interface),
+ KNOWN_ENTRY(pmap_find_phys),
+ KNOWN_ENTRY(vm_map),
+ KNOWN_ENTRY(vm_protect),
+ KNOWN_ENTRY(vm_region),
+ KNOWN_ENTRY(vm_map_unwire), /* vm_map_wire has an alternative symbol, vm_map_wire_external, in 10.11 */
+ KNOWN_ENTRY(PE_kputc),
+ KNOWN_ENTRY(kernel_map),
+ KNOWN_ENTRY(kernel_pmap),
+ };
+
+ for (unsigned i = 0; i < RT_ELEMENTS(s_aStandardCandles); i++)
+ {
+ uintptr_t uAddr = rtR0DbgKrnlDarwinLookup(pThis, s_aStandardCandles[i].pszName);
+#ifdef IN_RING0
+ if (uAddr != s_aStandardCandles[i].uAddr)
+#else
+ if (uAddr == 0)
+#endif
+ {
+ AssertLogRelMsgFailed(("%s (%p != %p)\n", s_aStandardCandles[i].pszName, uAddr, s_aStandardCandles[i].uAddr));
+ return VERR_INTERNAL_ERROR_2;
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads and validates the symbol and string tables.
+ *
+ * @returns IPRT status code.
+ * @param pThis The internal scratch data.
+ * @param pszKernelFile The name of the kernel file.
+ */
+static int rtR0DbgKrnlDarwinLoadSymTab(RTDBGKRNLINFOINT *pThis, const char *pszKernelFile)
+{
+ /*
+ * Load the tables.
+ */
+ pThis->paSyms = (MY_NLIST *)RTMemAllocZ(pThis->cSyms * sizeof(MY_NLIST));
+ if (!pThis->paSyms)
+ return VERR_NO_MEMORY;
+
+ int rc = RTFileReadAt(pThis->hFile, pThis->offArch + pThis->offSyms,
+ pThis->paSyms, pThis->cSyms * sizeof(MY_NLIST), NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ pThis->pachStrTab = (char *)RTMemAllocZ(pThis->cbStrTab + 1);
+ if (!pThis->pachStrTab)
+ return VERR_NO_MEMORY;
+
+ rc = RTFileReadAt(pThis->hFile, pThis->offArch + pThis->offStrTab,
+ pThis->pachStrTab, pThis->cbStrTab, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * The first string table symbol must be a zero length name.
+ */
+ if (pThis->pachStrTab[0] != '\0')
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ /*
+ * Validate the symbol table.
+ */
+ const char *pszPrev = "";
+ uint32_t const cSyms = pThis->cSyms;
+ MY_NLIST const *pSym = pThis->paSyms;
+ for (uint32_t iSym = 0; iSym < cSyms; iSym++, pSym++)
+ {
+ if ((uint32_t)pSym->n_un.n_strx >= pThis->cbStrTab)
+ {
+ printf("RTR0DbgKrnlInfoOpen: %s: Symbol #%u has a bad string table index: %#x vs cbStrTab=%#x\n",
+ pszKernelFile, iSym, pSym->n_un.n_strx, pThis->cbStrTab);
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+ const char *pszSym = &pThis->pachStrTab[(uint32_t)pSym->n_un.n_strx];
+#ifdef IN_RING3
+ RTAssertMsg2("%05i: %02x:%08llx %02x %04x %s\n", iSym, pSym->n_sect, (uint64_t)pSym->n_value, pSym->n_type, pSym->n_desc, pszSym);
+#endif
+
+ if (strcmp(pszSym, pszPrev) < 0)
+ RETURN_VERR_BAD_EXE_FORMAT; /* not sorted */
+
+ if (!(pSym->n_type & MACHO_N_STAB))
+ {
+ switch (pSym->n_type & MACHO_N_TYPE)
+ {
+ case MACHO_N_SECT:
+ if (pSym->n_sect == MACHO_NO_SECT)
+ {
+ printf("RTR0DbgKrnlInfoOpen: %s: Symbol #%u '%s' problem: n_sect = MACHO_NO_SECT\n",
+ pszKernelFile, iSym, pszSym);
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+ if (pSym->n_sect > pThis->cSections)
+ {
+ printf("RTR0DbgKrnlInfoOpen: %s: Symbol #%u '%s' problem: n_sect (%u) is higher than cSections (%u)\n",
+ pszKernelFile, iSym, pszSym, pSym->n_sect, pThis->cSections);
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+ if (pSym->n_desc & ~(REFERENCED_DYNAMICALLY | N_WEAK_DEF))
+ {
+ printf("RTR0DbgKrnlInfoOpen: %s: Symbol #%u '%s' problem: Unexpected value n_desc=%#x\n",
+ pszKernelFile, iSym, pszSym, pSym->n_desc);
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+ if ( pSym->n_value < pThis->apSections[pSym->n_sect - 1]->addr
+ && strcmp(pszSym, "__mh_execute_header")) /* in 10.8 it's no longer absolute (PIE?). */
+ {
+ printf("RTR0DbgKrnlInfoOpen: %s: Symbol #%u '%s' problem: n_value (%#llx) < section addr (%#llx)\n",
+ pszKernelFile, iSym, pszSym, pSym->n_value, pThis->apSections[pSym->n_sect - 1]->addr);
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+ if ( pSym->n_value - pThis->apSections[pSym->n_sect - 1]->addr
+ > pThis->apSections[pSym->n_sect - 1]->size
+ && strcmp(pszSym, "__mh_execute_header")) /* see above. */
+ {
+ printf("RTR0DbgKrnlInfoOpen: %s: Symbol #%u '%s' problem: n_value (%#llx) >= end of section (%#llx + %#llx)\n",
+ pszKernelFile, iSym, pszSym, pSym->n_value, pThis->apSections[pSym->n_sect - 1]->addr,
+ pThis->apSections[pSym->n_sect - 1]->size);
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+ break;
+
+ case MACHO_N_ABS:
+ if ( pSym->n_sect != MACHO_NO_SECT
+ && ( strcmp(pszSym, "__mh_execute_header") /* n_sect=1 in 10.7/amd64 */
+ || pSym->n_sect > pThis->cSections) )
+ {
+ printf("RTR0DbgKrnlInfoOpen: %s: Abs symbol #%u '%s' problem: n_sect (%u) is not MACHO_NO_SECT (cSections is %u)\n",
+ pszKernelFile, iSym, pszSym, pSym->n_sect, pThis->cSections);
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+ if (pSym->n_desc & ~(REFERENCED_DYNAMICALLY | N_WEAK_DEF))
+ {
+ printf("RTR0DbgKrnlInfoOpen: %s: Abs symbol #%u '%s' problem: Unexpected value n_desc=%#x\n",
+ pszKernelFile, iSym, pszSym, pSym->n_desc);
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+ break;
+
+ case MACHO_N_UNDF:
+ /* No undefined or common symbols in the kernel. */
+ printf("RTR0DbgKrnlInfoOpen: %s: Unexpected undefined symbol #%u '%s'\n", pszKernelFile, iSym, pszSym);
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ case MACHO_N_INDR:
+ /* No indirect symbols in the kernel. */
+ printf("RTR0DbgKrnlInfoOpen: %s: Unexpected indirect symbol #%u '%s'\n", pszKernelFile, iSym, pszSym);
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ case MACHO_N_PBUD:
+ /* No prebound symbols in the kernel. */
+ printf("RTR0DbgKrnlInfoOpen: %s: Unexpected prebound symbol #%u '%s'\n", pszKernelFile, iSym, pszSym);
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ default:
+ printf("RTR0DbgKrnlInfoOpen: %s: Unexpected symbol n_type %#x for symbol #%u '%s'\n",
+ pszKernelFile, pSym->n_type, iSym, pszSym);
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+ }
+ /* else: Ignore debug symbols. */
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads the load commands and validates them.
+ *
+ * @returns IPRT status code.
+ * @param pThis The internal scratch data.
+ */
+static int rtR0DbgKrnlDarwinLoadCommands(RTDBGKRNLINFOINT *pThis)
+{
+ pThis->offStrTab = 0;
+ pThis->cbStrTab = 0;
+ pThis->offSyms = 0;
+ pThis->cSyms = 0;
+ pThis->cSections = 0;
+
+ pThis->pLoadCmds = (load_command_t *)RTMemAlloc(pThis->cbLoadCmds);
+ if (!pThis->pLoadCmds)
+ return VERR_NO_MEMORY;
+
+ int rc = RTFileReadAt(pThis->hFile, pThis->offArch + sizeof(MY_MACHO_HEADER),
+ pThis->pLoadCmds, pThis->cbLoadCmds, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Validate the relevant commands, picking up sections and the symbol
+ * table location.
+ */
+ load_command_t const *pCmd = pThis->pLoadCmds;
+ for (uint32_t iCmd = 0; ; iCmd++)
+ {
+ /* cmd index & offset. */
+ uintptr_t offCmd = (uintptr_t)pCmd - (uintptr_t)pThis->pLoadCmds;
+ if (offCmd == pThis->cbLoadCmds && iCmd == pThis->cLoadCmds)
+ break;
+ if (offCmd + sizeof(*pCmd) > pThis->cbLoadCmds)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if (iCmd >= pThis->cLoadCmds)
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ /* cmdsize */
+ if (pCmd->cmdsize < sizeof(*pCmd))
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if (pCmd->cmdsize > pThis->cbLoadCmds)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if (RT_ALIGN_32(pCmd->cmdsize, 4) != pCmd->cmdsize)
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ /* cmd */
+ switch (pCmd->cmd & ~LC_REQ_DYLD)
+ {
+ /* Validate and store the symbol table details. */
+ case LC_SYMTAB:
+ {
+ struct symtab_command const *pSymTab = (struct symtab_command const *)pCmd;
+ if (pSymTab->cmdsize != sizeof(*pSymTab))
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if (pSymTab->nsyms > _1M)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if (pSymTab->strsize > _2M)
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ pThis->offStrTab = pSymTab->stroff;
+ pThis->cbStrTab = pSymTab->strsize;
+ pThis->offSyms = pSymTab->symoff;
+ pThis->cSyms = pSymTab->nsyms;
+ break;
+ }
+
+ /* Validate the segment. */
+#if ARCH_BITS == 32
+ case LC_SEGMENT_32:
+#elif ARCH_BITS == 64
+ case LC_SEGMENT_64:
+#else
+# error ARCH_BITS
+#endif
+ {
+ MY_SEGMENT_COMMAND const *pSeg = (MY_SEGMENT_COMMAND const *)pCmd;
+ if (pSeg->cmdsize < sizeof(*pSeg))
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ if (pSeg->segname[0] == '\0')
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ if (pSeg->nsects > MACHO_MAX_SECT)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if (pSeg->nsects * sizeof(MY_SECTION) + sizeof(*pSeg) != pSeg->cmdsize)
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ if (pSeg->flags & ~(SG_HIGHVM | SG_FVMLIB | SG_NORELOC | SG_PROTECTED_VERSION_1))
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ if ( pSeg->vmaddr != 0
+ || !strcmp(pSeg->segname, "__PAGEZERO"))
+ {
+ if (pSeg->vmaddr + RT_ALIGN_Z(pSeg->vmsize, RT_BIT_32(12)) < pSeg->vmaddr)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+ else if (pSeg->vmsize)
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ if (pSeg->maxprot & ~VM_PROT_ALL)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if (pSeg->initprot & ~VM_PROT_ALL)
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ /* Validate the sections. */
+ uint32_t uAlignment = 0;
+ MY_SECTION const *paSects = (MY_SECTION const *)(pSeg + 1);
+ for (uint32_t i = 0; i < pSeg->nsects; i++)
+ {
+ if (paSects[i].sectname[0] == '\0')
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if (memcmp(paSects[i].segname, pSeg->segname, sizeof(pSeg->segname)))
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ switch (paSects[i].flags & SECTION_TYPE)
+ {
+ case S_REGULAR:
+ case S_CSTRING_LITERALS:
+ case S_NON_LAZY_SYMBOL_POINTERS:
+ case S_MOD_INIT_FUNC_POINTERS:
+ case S_MOD_TERM_FUNC_POINTERS:
+ case S_COALESCED:
+ case S_4BYTE_LITERALS:
+ if ( pSeg->filesize != 0
+ ? paSects[i].offset - pSeg->fileoff >= pSeg->filesize
+ : paSects[i].offset - pSeg->fileoff != pSeg->filesize)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if ( paSects[i].addr != 0
+ && paSects[i].offset - pSeg->fileoff != paSects[i].addr - pSeg->vmaddr)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ break;
+
+ case S_ZEROFILL:
+ if (paSects[i].offset != 0)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ break;
+
+ /* not observed */
+ case S_SYMBOL_STUBS:
+ case S_INTERPOSING:
+ case S_8BYTE_LITERALS:
+ case S_16BYTE_LITERALS:
+ case S_DTRACE_DOF:
+ case S_LAZY_SYMBOL_POINTERS:
+ case S_LAZY_DYLIB_SYMBOL_POINTERS:
+ RETURN_VERR_LDR_UNEXPECTED;
+ case S_GB_ZEROFILL:
+ RETURN_VERR_LDR_UNEXPECTED;
+ default:
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+
+ if (paSects[i].align > 12)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if (paSects[i].align > uAlignment)
+ uAlignment = paSects[i].align;
+
+ /* Add to the section table. */
+ if (pThis->cSections == MACHO_MAX_SECT)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ pThis->apSections[pThis->cSections++] = &paSects[i];
+ }
+
+ if (RT_ALIGN_Z(pSeg->vmaddr, RT_BIT_32(uAlignment)) != pSeg->vmaddr)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if ( pSeg->filesize > RT_ALIGN_Z(pSeg->vmsize, RT_BIT_32(uAlignment))
+ && pSeg->vmsize != 0)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ break;
+ }
+
+ case LC_UUID:
+ if (pCmd->cmdsize != sizeof(uuid_command))
+ RETURN_VERR_BAD_EXE_FORMAT;
+ break;
+
+ case LC_DYSYMTAB:
+ case LC_UNIXTHREAD:
+ case LC_CODE_SIGNATURE:
+ case LC_VERSION_MIN_MACOSX:
+ case LC_FUNCTION_STARTS:
+ case LC_MAIN:
+ case LC_DATA_IN_CODE:
+ case LC_SOURCE_VERSION:
+ case LC_ENCRYPTION_INFO_64:
+ case LC_LINKER_OPTION:
+ case LC_LINKER_OPTIMIZATION_HINT:
+ case LC_VERSION_MIN_TVOS:
+ case LC_VERSION_MIN_WATCHOS:
+ case LC_NOTE:
+ case LC_BUILD_VERSION:
+ break;
+
+ /* not observed */
+ case LC_SYMSEG:
+#if ARCH_BITS == 32
+ case LC_SEGMENT_64:
+#elif ARCH_BITS == 64
+ case LC_SEGMENT_32:
+#endif
+ case LC_ROUTINES_64:
+ case LC_ROUTINES:
+ case LC_THREAD:
+ case LC_LOADFVMLIB:
+ case LC_IDFVMLIB:
+ case LC_IDENT:
+ case LC_FVMFILE:
+ case LC_PREPAGE:
+ case LC_TWOLEVEL_HINTS:
+ case LC_PREBIND_CKSUM:
+ case LC_SEGMENT_SPLIT_INFO:
+ case LC_ENCRYPTION_INFO:
+ RETURN_VERR_LDR_UNEXPECTED;
+
+ /* no phones here yet */
+ case LC_VERSION_MIN_IPHONEOS:
+ RETURN_VERR_LDR_UNEXPECTED;
+
+ /* dylib */
+ case LC_LOAD_DYLIB:
+ case LC_ID_DYLIB:
+ case LC_LOAD_DYLINKER:
+ case LC_ID_DYLINKER:
+ case LC_PREBOUND_DYLIB:
+ case LC_LOAD_WEAK_DYLIB & ~LC_REQ_DYLD:
+ case LC_SUB_FRAMEWORK:
+ case LC_SUB_UMBRELLA:
+ case LC_SUB_CLIENT:
+ case LC_SUB_LIBRARY:
+ case LC_RPATH:
+ case LC_REEXPORT_DYLIB:
+ case LC_LAZY_LOAD_DYLIB:
+ case LC_DYLD_INFO:
+ case LC_DYLD_INFO_ONLY:
+ case LC_LOAD_UPWARD_DYLIB:
+ case LC_DYLD_ENVIRONMENT:
+ case LC_DYLIB_CODE_SIGN_DRS:
+ RETURN_VERR_LDR_UNEXPECTED;
+
+ default:
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+
+ /* next */
+ pCmd = (load_command_t *)((uintptr_t)pCmd + pCmd->cmdsize);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Loads the FAT and MACHO headers, noting down the relevant info.
+ *
+ * @returns IPRT status code.
+ * @param pThis The internal scratch data.
+ */
+static int rtR0DbgKrnlDarwinLoadFileHeaders(RTDBGKRNLINFOINT *pThis)
+{
+ uint32_t i;
+
+ pThis->offArch = 0;
+ pThis->cbArch = 0;
+
+ /*
+ * Read the first bit of the file, parse the FAT if found there.
+ */
+ int rc = RTFileReadAt(pThis->hFile, 0, pThis->abBuf, sizeof(fat_header_t) + sizeof(fat_arch_t) * 16, NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ fat_header_t *pFat = (fat_header *)pThis->abBuf;
+ fat_arch_t *paFatArches = (fat_arch_t *)(pFat + 1);
+
+ /* Correct FAT endian first. */
+ if (pFat->magic == IMAGE_FAT_SIGNATURE_OE)
+ {
+ pFat->magic = RT_BSWAP_U32(pFat->magic);
+ pFat->nfat_arch = RT_BSWAP_U32(pFat->nfat_arch);
+ i = RT_MIN(pFat->nfat_arch, 16);
+ while (i-- > 0)
+ {
+ paFatArches[i].cputype = RT_BSWAP_U32(paFatArches[i].cputype);
+ paFatArches[i].cpusubtype = RT_BSWAP_U32(paFatArches[i].cpusubtype);
+ paFatArches[i].offset = RT_BSWAP_U32(paFatArches[i].offset);
+ paFatArches[i].size = RT_BSWAP_U32(paFatArches[i].size);
+ paFatArches[i].align = RT_BSWAP_U32(paFatArches[i].align);
+ }
+ }
+
+ /* Lookup our architecture in the FAT. */
+ if (pFat->magic == IMAGE_FAT_SIGNATURE)
+ {
+ if (pFat->nfat_arch > 16)
+ RETURN_VERR_BAD_EXE_FORMAT;
+
+ for (i = 0; i < pFat->nfat_arch; i++)
+ {
+ if ( paFatArches[i].cputype == MY_CPU_TYPE
+ && paFatArches[i].cpusubtype == MY_CPU_SUBTYPE_ALL)
+ {
+ pThis->offArch = paFatArches[i].offset;
+ pThis->cbArch = paFatArches[i].size;
+ if (!pThis->cbArch)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if (pThis->offArch < sizeof(fat_header_t) + sizeof(fat_arch_t) * pFat->nfat_arch)
+ RETURN_VERR_BAD_EXE_FORMAT;
+ if (pThis->offArch + pThis->cbArch <= pThis->offArch)
+ RETURN_VERR_LDR_ARCH_MISMATCH;
+ break;
+ }
+ }
+ if (i >= pFat->nfat_arch)
+ RETURN_VERR_LDR_ARCH_MISMATCH;
+ }
+
+ /*
+ * Read the Mach-O header and validate it.
+ */
+ rc = RTFileReadAt(pThis->hFile, pThis->offArch, pThis->abBuf, sizeof(MY_MACHO_HEADER), NULL);
+ if (RT_FAILURE(rc))
+ return rc;
+ MY_MACHO_HEADER const *pHdr = (MY_MACHO_HEADER const *)pThis->abBuf;
+ if (pHdr->magic != MY_MACHO_MAGIC)
+ {
+ if ( pHdr->magic == IMAGE_MACHO32_SIGNATURE
+ || pHdr->magic == IMAGE_MACHO32_SIGNATURE_OE
+ || pHdr->magic == IMAGE_MACHO64_SIGNATURE
+ || pHdr->magic == IMAGE_MACHO64_SIGNATURE_OE)
+ RETURN_VERR_LDR_ARCH_MISMATCH;
+ RETURN_VERR_BAD_EXE_FORMAT;
+ }
+
+ if (pHdr->cputype != MY_CPU_TYPE)
+ RETURN_VERR_LDR_ARCH_MISMATCH;
+ if (pHdr->cpusubtype != MY_CPU_SUBTYPE_ALL)
+ RETURN_VERR_LDR_ARCH_MISMATCH;
+ if (pHdr->filetype != MH_EXECUTE)
+ RETURN_VERR_LDR_UNEXPECTED;
+ if (pHdr->ncmds < 4)
+ RETURN_VERR_LDR_UNEXPECTED;
+ if (pHdr->ncmds > 256)
+ RETURN_VERR_LDR_UNEXPECTED;
+ if (pHdr->sizeofcmds <= pHdr->ncmds * sizeof(load_command_t))
+ RETURN_VERR_LDR_UNEXPECTED;
+ if (pHdr->sizeofcmds >= _1M)
+ RETURN_VERR_LDR_UNEXPECTED;
+ if (pHdr->flags & ~MH_VALID_FLAGS)
+ RETURN_VERR_LDR_UNEXPECTED;
+
+ pThis->cLoadCmds = pHdr->ncmds;
+ pThis->cbLoadCmds = pHdr->sizeofcmds;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Destructor.
+ *
+ * @param pThis The instance to destroy.
+ */
+static void rtR0DbgKrnlDarwinDtor(RTDBGKRNLINFOINT *pThis)
+{
+ pThis->u32Magic = ~RTDBGKRNLINFO_MAGIC;
+
+ RTMemFree(pThis->pachStrTab);
+ pThis->pachStrTab = NULL;
+
+ RTMemFree(pThis->paSyms);
+ pThis->paSyms = NULL;
+
+ RTMemFree(pThis);
+}
+
+
+static int rtR0DbgKrnlDarwinOpen(PRTDBGKRNLINFO phKrnlInfo, const char *pszKernelFile)
+{
+ RTDBGKRNLINFOINT *pThis = (RTDBGKRNLINFOINT *)RTMemAllocZ(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+ pThis->hFile = NIL_RTFILE;
+
+ int rc = RTFileOpen(&pThis->hFile, pszKernelFile, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE);
+ if (RT_SUCCESS(rc))
+ rc = rtR0DbgKrnlDarwinLoadFileHeaders(pThis);
+ if (RT_SUCCESS(rc))
+ rc = rtR0DbgKrnlDarwinLoadCommands(pThis);
+ if (RT_SUCCESS(rc))
+ rc = rtR0DbgKrnlDarwinLoadSymTab(pThis, pszKernelFile);
+ if (RT_SUCCESS(rc))
+ {
+#ifdef IN_RING0
+ /*
+ * Determine the load displacement (10.8 kernels are PIE).
+ */
+ uintptr_t uLinkAddr = rtR0DbgKrnlDarwinLookup(pThis, "kernel_map");
+ if (uLinkAddr != 0)
+ pThis->offLoad = (uintptr_t)&kernel_map - uLinkAddr;
+#endif
+ rc = rtR0DbgKrnlDarwinCheckStandardSymbols(pThis);
+ }
+
+ rtR0DbgKrnlDarwinLoadDone(pThis);
+ if (RT_SUCCESS(rc))
+ {
+ pThis->u32Magic = RTDBGKRNLINFO_MAGIC;
+ pThis->cRefs = 1;
+ *phKrnlInfo = pThis;
+ }
+ else
+ rtR0DbgKrnlDarwinDtor(pThis);
+ return rc;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoOpen(PRTDBGKRNLINFO phKrnlInfo, uint32_t fFlags)
+{
+ AssertPtrReturn(phKrnlInfo, VERR_INVALID_POINTER);
+ *phKrnlInfo = NIL_RTDBGKRNLINFO;
+ AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
+
+ /*
+ * Go thru likely kernel locations
+ *
+ * Note! Check the OS X version and reorder the list?
+ * Note! We should try fish kcsuffix out of bootargs or somewhere one day.
+ */
+ static bool s_fFirstCall = true;
+#ifdef IN_RING3
+ extern const char *g_pszTestKernel;
+#endif
+ struct
+ {
+ const char *pszLocation;
+ int rc;
+ } aKernels[] =
+ {
+#ifdef IN_RING3
+ { g_pszTestKernel, VERR_WRONG_ORDER },
+#endif
+ { "/System/Library/Kernels/kernel", VERR_WRONG_ORDER },
+ { "/System/Library/Kernels/kernel.development", VERR_WRONG_ORDER },
+ { "/System/Library/Kernels/kernel.debug", VERR_WRONG_ORDER },
+ { "/mach_kernel", VERR_WRONG_ORDER },
+ };
+ int rc = VERR_WRONG_ORDER; /* shut up stupid MSC */
+ for (uint32_t i = 0; i < RT_ELEMENTS(aKernels); i++)
+ {
+ aKernels[i].rc = rc = rtR0DbgKrnlDarwinOpen(phKrnlInfo, aKernels[i].pszLocation);
+ if (RT_SUCCESS(rc))
+ {
+ if (s_fFirstCall)
+ {
+ printf("RTR0DbgKrnlInfoOpen: Using kernel file '%s'\n", aKernels[i].pszLocation);
+ s_fFirstCall = false;
+ }
+ return rc;
+ }
+ }
+
+ /*
+ * Failed.
+ */
+ /* Pick the best error code. */
+ for (uint32_t i = 0; rc == VERR_FILE_NOT_FOUND && i < RT_ELEMENTS(aKernels); i++)
+ if (aKernels[i].rc != VERR_FILE_NOT_FOUND)
+ rc = aKernels[i].rc;
+
+ /* Bitch about it. */
+ printf("RTR0DbgKrnlInfoOpen: failed to find matching kernel file! rc=%d\n", rc);
+ if (s_fFirstCall)
+ {
+ for (uint32_t i = 0; i < RT_ELEMENTS(aKernels); i++)
+ printf("RTR0DbgKrnlInfoOpen: '%s' -> %d\n", aKernels[i].pszLocation, aKernels[i].rc);
+ s_fFirstCall = false;
+ }
+
+ return rc;
+}
+
+
+RTR0DECL(uint32_t) RTR0DbgKrnlInfoRetain(RTDBGKRNLINFO hKrnlInfo)
+{
+ RTDBGKRNLINFOINT *pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, UINT32_MAX);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+ return cRefs;
+}
+
+
+RTR0DECL(uint32_t) RTR0DbgKrnlInfoRelease(RTDBGKRNLINFO hKrnlInfo)
+{
+ RTDBGKRNLINFOINT *pThis = hKrnlInfo;
+ if (pThis == NIL_RTDBGKRNLINFO)
+ return 0;
+ AssertPtrReturn(pThis, UINT32_MAX);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicDecU32(&pThis->cRefs);
+ if (cRefs == 0)
+ rtR0DbgKrnlDarwinDtor(pThis);
+ return cRefs;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoQueryMember(RTDBGKRNLINFO hKrnlInfo, const char *pszModule, const char *pszStructure,
+ const char *pszMember, size_t *poffMember)
+{
+ RTDBGKRNLINFOINT *pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertPtrReturn(pszMember, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszStructure, VERR_INVALID_POINTER);
+ AssertPtrReturn(poffMember, VERR_INVALID_POINTER);
+ return VERR_NOT_FOUND;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoQuerySymbol(RTDBGKRNLINFO hKrnlInfo, const char *pszModule,
+ const char *pszSymbol, void **ppvSymbol)
+{
+ RTDBGKRNLINFOINT *pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertPtrReturn(pszSymbol, VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(ppvSymbol, VERR_INVALID_PARAMETER);
+ AssertReturn(!pszModule, VERR_MODULE_NOT_FOUND);
+
+ uintptr_t uValue = rtR0DbgKrnlDarwinLookup(pThis, pszSymbol);
+ if (ppvSymbol)
+ *ppvSymbol = (void *)uValue;
+ if (uValue)
+ return VINF_SUCCESS;
+ return VERR_SYMBOL_NOT_FOUND;
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/fileio-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/fileio-r0drv-darwin.cpp
new file mode 100644
index 00000000..e39ee408
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/fileio-r0drv-darwin.cpp
@@ -0,0 +1,311 @@
+/* $Id: fileio-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - File I/O, R0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2011-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+
+#include <iprt/file.h>
+#include "internal/iprt.h"
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/log.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Default file permissions for newly created files. */
+#if defined(S_IRUSR) && defined(S_IWUSR)
+# define RT_FILE_PERMISSION (S_IRUSR | S_IWUSR)
+#else
+# define RT_FILE_PERMISSION (00600)
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Darwin kernel file handle data.
+ */
+typedef struct RTFILEINT
+{
+ /** Magic value (RTFILE_MAGIC). */
+ uint32_t u32Magic;
+ /** The open mode flags passed to the kernel API. */
+ int fOpenMode;
+ /** The open flags passed to RTFileOpen. */
+ uint64_t fOpen;
+ /** The VFS context in which the file was opened. */
+ vfs_context_t hVfsCtx;
+ /** The vnode returned by vnode_open. */
+ vnode_t hVnode;
+ /** The current file offset. */
+ uint64_t offFile;
+} RTFILEINT;
+/** Magic number for RTFILEINT::u32Magic (To Be Determined). */
+#define RTFILE_MAGIC UINT32_C(0x01020304)
+
+
+RTDECL(int) RTFileOpen(PRTFILE phFile, const char *pszFilename, uint64_t fOpen)
+{
+ RTFILEINT *pThis = (RTFILEINT *)RTMemAllocZ(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ errno_t rc;
+ pThis->u32Magic = RTFILE_MAGIC;
+ pThis->fOpen = fOpen;
+ pThis->hVfsCtx = vfs_context_current();
+ if (pThis->hVfsCtx != NULL)
+ {
+ int fCMode = (fOpen & RTFILE_O_CREATE_MODE_MASK)
+ ? (fOpen & RTFILE_O_CREATE_MODE_MASK) >> RTFILE_O_CREATE_MODE_SHIFT
+ : RT_FILE_PERMISSION;
+ int fVnFlags = 0; /* VNODE_LOOKUP_XXX */
+ int fOpenMode = 0;
+ if (fOpen & RTFILE_O_NON_BLOCK)
+ fOpenMode |= O_NONBLOCK;
+ if (fOpen & RTFILE_O_WRITE_THROUGH)
+ fOpenMode |= O_SYNC;
+
+ /* create/truncate file */
+ switch (fOpen & RTFILE_O_ACTION_MASK)
+ {
+ case RTFILE_O_OPEN: break;
+ case RTFILE_O_OPEN_CREATE: fOpenMode |= O_CREAT; break;
+ case RTFILE_O_CREATE: fOpenMode |= O_CREAT | O_EXCL; break;
+ case RTFILE_O_CREATE_REPLACE: fOpenMode |= O_CREAT | O_TRUNC; break; /** @todo replacing needs fixing, this is *not* a 1:1 mapping! */
+ }
+ if (fOpen & RTFILE_O_TRUNCATE)
+ fOpenMode |= O_TRUNC;
+
+ switch (fOpen & RTFILE_O_ACCESS_MASK)
+ {
+ case RTFILE_O_READ:
+ fOpenMode |= FREAD;
+ break;
+ case RTFILE_O_WRITE:
+ fOpenMode |= fOpen & RTFILE_O_APPEND ? O_APPEND | FWRITE : FWRITE;
+ break;
+ case RTFILE_O_READWRITE:
+ fOpenMode |= fOpen & RTFILE_O_APPEND ? O_APPEND | FWRITE | FREAD : FWRITE | FREAD;
+ break;
+ default:
+ AssertMsgFailed(("RTFileOpen received an invalid RW value, fOpen=%#x\n", fOpen));
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_INVALID_PARAMETER;
+ }
+
+ pThis->fOpenMode = fOpenMode;
+ rc = vnode_open(pszFilename, fOpenMode, fCMode, fVnFlags, &pThis->hVnode, pThis->hVfsCtx);
+ if (rc == 0)
+ {
+ *phFile = pThis;
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ rc = RTErrConvertFromErrno(rc);
+ }
+ else
+ rc = VERR_INTERNAL_ERROR_5;
+ RTMemFree(pThis);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+RTDECL(int) RTFileClose(RTFILE hFile)
+{
+ if (hFile == NIL_RTFILE)
+ return VINF_SUCCESS;
+
+ RTFILEINT *pThis = hFile;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->u32Magic == RTFILE_MAGIC, VERR_INVALID_HANDLE);
+ pThis->u32Magic = ~RTFILE_MAGIC;
+
+ IPRT_DARWIN_SAVE_EFL_AC();
+ errno_t rc = vnode_close(pThis->hVnode, pThis->fOpenMode & (FREAD | FWRITE), pThis->hVfsCtx);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+
+ RTMemFree(pThis);
+ return RTErrConvertFromErrno(rc);
+}
+
+
+RTDECL(int) RTFileReadAt(RTFILE hFile, RTFOFF off, void *pvBuf, size_t cbToRead, size_t *pcbRead)
+{
+ RTFILEINT *pThis = hFile;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->u32Magic == RTFILE_MAGIC, VERR_INVALID_HANDLE);
+
+ off_t offNative = (off_t)off;
+ AssertReturn((RTFOFF)offNative == off, VERR_OUT_OF_RANGE);
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+#if 0 /* Added in 10.6, grr. */
+ errno_t rc;
+ if (!pcbRead)
+ rc = vn_rdwr(UIO_READ, pThis->hVnode, (char *)pvBuf, cbToRead, offNative, UIO_SYSSPACE, 0 /*ioflg*/,
+ vfs_context_ucred(pThis->hVfsCtx), NULL, vfs_context_proc(pThis->hVfsCtx));
+ else
+ {
+ int cbLeft = 0;
+ rc = vn_rdwr(UIO_READ, pThis->hVnode, (char *)pvBuf, cbToRead, offNative, UIO_SYSSPACE, 0 /*ioflg*/,
+ vfs_context_ucred(pThis->hVfsCtx), &cbLeft, vfs_context_proc(pThis->hVfsCtx));
+ *pcbRead = cbToRead - cbLeft;
+ }
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return !rc ? VINF_SUCCESS : RTErrConvertFromErrno(rc);
+
+#else
+ uio_t hUio = uio_create(1, offNative, UIO_SYSSPACE, UIO_READ);
+ if (!hUio)
+ {
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+ }
+ errno_t rc;
+ if (uio_addiov(hUio, (user_addr_t)(uintptr_t)pvBuf, cbToRead) == 0)
+ {
+ rc = VNOP_READ(pThis->hVnode, hUio, 0 /*ioflg*/, pThis->hVfsCtx);
+ off_t const cbActual = cbToRead - uio_resid(hUio);
+ if (pcbRead)
+ *pcbRead = cbActual;
+ if (rc == 0)
+ {
+ pThis->offFile += (uint64_t)cbActual;
+ if (cbToRead != (uint64_t)cbActual)
+ rc = VERR_FILE_IO_ERROR;
+ }
+ else
+ rc = RTErrConvertFromErrno(rc);
+ }
+ else
+ rc = VERR_INTERNAL_ERROR_3;
+ uio_free(hUio);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+#endif
+}
+
+
+RTDECL(int) RTFileRead(RTFILE hFile, void *pvBuf, size_t cbToRead, size_t *pcbRead)
+{
+ RTFILEINT *pThis = hFile;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->u32Magic == RTFILE_MAGIC, VERR_INVALID_HANDLE);
+
+ return RTFileReadAt(hFile, pThis->offFile, pvBuf, cbToRead, pcbRead);
+}
+
+
+RTDECL(int) RTFileGetSize(RTFILE hFile, uint64_t *pcbSize)
+{
+ RTFILEINT *pThis = hFile;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->u32Magic == RTFILE_MAGIC, VERR_INVALID_HANDLE);
+
+ /*
+ * Query the data size attribute.
+ * Note! Allocate extra attribute buffer space to be on the safe side.
+ */
+ union
+ {
+ struct vnode_attr VAttr;
+ uint8_t abPadding[sizeof(struct vnode_attr) * 2];
+ } uBuf;
+ RT_ZERO(uBuf);
+ struct vnode_attr *pVAttr = &uBuf.VAttr;
+
+ VATTR_INIT(pVAttr);
+ VATTR_WANTED(pVAttr, va_data_size);
+
+ errno_t rc = vnode_getattr(pThis->hVnode, pVAttr, pThis->hVfsCtx);
+ if (!rc)
+ {
+ *pcbSize = pVAttr->va_data_size;
+ return VINF_SUCCESS;
+ }
+ return RTErrConvertFromErrno(rc);
+}
+
+
+RTDECL(int) RTFileSeek(RTFILE hFile, int64_t offSeek, unsigned uMethod, uint64_t *poffActual)
+{
+ RTFILEINT *pThis = hFile;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->u32Magic == RTFILE_MAGIC, VERR_INVALID_HANDLE);
+
+ uint64_t offNew;
+ switch (uMethod)
+ {
+ case RTFILE_SEEK_BEGIN:
+ AssertReturn(offSeek >= 0, VERR_NEGATIVE_SEEK);
+ offNew = offSeek;
+ break;
+
+ case RTFILE_SEEK_CURRENT:
+ offNew = pThis->offFile + offSeek;
+ break;
+
+ case RTFILE_SEEK_END:
+ {
+ uint64_t cbFile = 0;
+ int rc = RTFileGetSize(hFile, &cbFile);
+ if (RT_SUCCESS(rc))
+ offNew = cbFile + offSeek;
+ else
+ return rc;
+ break;
+ }
+
+ default:
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if ((RTFOFF)offNew >= 0)
+ {
+ pThis->offFile = offNew;
+ if (poffActual)
+ *poffActual = offNew;
+ return VINF_SUCCESS;
+ }
+ return VERR_NEGATIVE_SEEK;
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/initterm-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/initterm-r0drv-darwin.cpp
new file mode 100644
index 00000000..712b8e6b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/initterm-r0drv-darwin.cpp
@@ -0,0 +1,115 @@
+/* $Id: initterm-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Initialization & Termination, R0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/errcore.h>
+#include <iprt/assert.h>
+#include <iprt/dbg.h>
+#include "internal/initterm.h"
+
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Pointer to the lock group used by IPRT. */
+lck_grp_t *g_pDarwinLockGroup = NULL;
+/** Pointer to the ast_pending function, if found. */
+PFNR0DARWINASTPENDING g_pfnR0DarwinAstPending = NULL;
+/** Pointer to the cpu_interrupt function, if found. */
+PFNR0DARWINCPUINTERRUPT g_pfnR0DarwinCpuInterrupt = NULL;
+
+
+DECLHIDDEN(int) rtR0InitNative(void)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Create the lock group.
+ */
+ g_pDarwinLockGroup = lck_grp_alloc_init("IPRT", LCK_GRP_ATTR_NULL);
+ AssertReturn(g_pDarwinLockGroup, VERR_NO_MEMORY);
+
+ /*
+ * Initialize the preemption hacks.
+ */
+ int rc = rtThreadPreemptDarwinInit();
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Try resolve kernel symbols we need but apple don't wish to give us.
+ */
+ RTDBGKRNLINFO hKrnlInfo;
+ rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0 /*fFlags*/);
+ if (RT_SUCCESS(rc))
+ {
+ RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "ast_pending", (void **)&g_pfnR0DarwinAstPending);
+ printf("ast_pending=%p\n", g_pfnR0DarwinAstPending);
+ RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "cpu_interrupt", (void **)&g_pfnR0DarwinCpuInterrupt);
+ printf("cpu_interrupt=%p\n", g_pfnR0DarwinCpuInterrupt);
+ RTR0DbgKrnlInfoRelease(hKrnlInfo);
+ }
+ if (RT_FAILURE(rc))
+ {
+ printf("rtR0InitNative: warning! failed to resolve special kernel symbols\n");
+ rc = VINF_SUCCESS;
+ }
+ }
+ if (RT_FAILURE(rc))
+ rtR0TermNative();
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(void) rtR0TermNative(void)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Preemption hacks before the lock group.
+ */
+ rtThreadPreemptDarwinTerm();
+
+ /*
+ * Free the lock group.
+ */
+ if (g_pDarwinLockGroup)
+ {
+ lck_grp_free(g_pDarwinLockGroup);
+ g_pDarwinLockGroup = NULL;
+ }
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp
new file mode 100644
index 00000000..ea3b5839
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp
@@ -0,0 +1,1244 @@
+/* $Id: memobj-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/memobj.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/x86.h>
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/log.h>
+#include <iprt/mem.h>
+#include <iprt/param.h>
+#include <iprt/process.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include "internal/memobj.h"
+
+/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The Darwin version of the memory object structure.
+ */
+typedef struct RTR0MEMOBJDARWIN
+{
+ /** The core structure. */
+ RTR0MEMOBJINTERNAL Core;
+ /** Pointer to the memory descriptor created for allocated and locked memory. */
+ IOMemoryDescriptor *pMemDesc;
+ /** Pointer to the memory mapping object for mapped memory. */
+ IOMemoryMap *pMemMap;
+} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
+
+
+/**
+ * Touch the pages to force the kernel to create or write-enable the page table
+ * entries.
+ *
+ * This is necessary since the kernel gets upset if we take a page fault when
+ * preemption is disabled and/or we own a simple lock (same thing). It has no
+ * problems with us disabling interrupts when taking the traps, weird stuff.
+ *
+ * (This is basically a way of invoking vm_fault on a range of pages.)
+ *
+ * @param pv Pointer to the first page.
+ * @param cb The number of bytes.
+ */
+static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
+{
+ uint32_t volatile *pu32 = (uint32_t volatile *)pv;
+ for (;;)
+ {
+ ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
+ if (cb <= PAGE_SIZE)
+ break;
+ cb -= PAGE_SIZE;
+ pu32 += PAGE_SIZE / sizeof(uint32_t);
+ }
+}
+
+
+/**
+ * Read (sniff) every page in the range to make sure there are some page tables
+ * entries backing it.
+ *
+ * This is just to be sure vm_protect didn't remove stuff without re-adding it
+ * if someone should try write-protect something.
+ *
+ * @param pv Pointer to the first page.
+ * @param cb The number of bytes.
+ */
+static void rtR0MemObjDarwinSniffPages(void const *pv, size_t cb)
+{
+ uint32_t volatile *pu32 = (uint32_t volatile *)pv;
+ uint32_t volatile u32Counter = 0;
+ for (;;)
+ {
+ u32Counter += *pu32;
+
+ if (cb <= PAGE_SIZE)
+ break;
+ cb -= PAGE_SIZE;
+ pu32 += PAGE_SIZE / sizeof(uint32_t);
+ }
+}
+
+
+/**
+ * Gets the virtual memory map the specified object is mapped into.
+ *
+ * @returns VM map handle on success, NULL if no map.
+ * @param pMem The memory object.
+ */
+DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
+{
+ switch (pMem->enmType)
+ {
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_CONT:
+ return kernel_map;
+
+ case RTR0MEMOBJTYPE_PHYS:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ return NULL; /* pretend these have no mapping atm. */
+
+ case RTR0MEMOBJTYPE_LOCK:
+ return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : get_task_map((task_t)pMem->u.Lock.R0Process);
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : get_task_map((task_t)pMem->u.ResVirt.R0Process);
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : get_task_map((task_t)pMem->u.Mapping.R0Process);
+
+ default:
+ return NULL;
+ }
+}
+
+#if 0 /* not necessary after all*/
+/* My vm_map mockup. */
+struct my_vm_map
+{
+ struct { char pad[8]; } lock;
+ struct my_vm_map_header
+ {
+ struct vm_map_links
+ {
+ void *prev;
+ void *next;
+ vm_map_offset_t start;
+ vm_map_offset_t end;
+ } links;
+ int nentries;
+ boolean_t entries_pageable;
+ } hdr;
+ pmap_t pmap;
+ vm_map_size_t size;
+};
+
+
+/**
+ * Gets the minimum map address, this is similar to get_map_min.
+ *
+ * @returns The start address of the map.
+ * @param pMap The map.
+ */
+static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
+{
+ /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
+ static int32_t volatile s_offAdjust = INT32_MAX;
+ int32_t off = s_offAdjust;
+ if (off == INT32_MAX)
+ {
+ for (off = 0; ; off += sizeof(pmap_t))
+ {
+ if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
+ break;
+ AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
+ }
+ ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
+ }
+
+ /* calculate it. */
+ struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
+ return pMyMap->hdr.links.start;
+}
+#endif /* unused */
+
+#ifdef RT_STRICT
+# if 0 /* unused */
+
+/**
+ * Read from a physical page.
+ *
+ * @param HCPhys The address to start reading at.
+ * @param cb How many bytes to read.
+ * @param pvDst Where to put the bytes. This is zero'd on failure.
+ */
+static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
+{
+ memset(pvDst, '\0', cb);
+
+ IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN_Z(cb, PAGE_SIZE) } };
+ IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
+ kIODirectionIn, NULL /*task*/);
+ if (pMemDesc)
+ {
+#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
+ IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
+#else
+ IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
+#endif
+ if (pMemMap)
+ {
+ void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
+ memcpy(pvDst, pvSrc, cb);
+ pMemMap->release();
+ }
+ else
+ printf("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
+
+ pMemDesc->release();
+ }
+ else
+ printf("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
+}
+
+
+/**
+ * Gets the PTE for a page.
+ *
+ * @returns the PTE.
+ * @param pvPage The virtual address to get the PTE for.
+ */
+static uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
+{
+ RTUINT64U u64;
+ RTCCUINTREG cr3 = ASMGetCR3();
+ RTCCUINTREG cr4 = ASMGetCR4();
+ bool fPAE = false;
+ bool fLMA = false;
+ if (cr4 & X86_CR4_PAE)
+ {
+ fPAE = true;
+ uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
+ if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
+ {
+ uint64_t efer = ASMRdMsr(MSR_K6_EFER);
+ if (efer & MSR_K6_EFER_LMA)
+ fLMA = true;
+ }
+ }
+
+ if (fLMA)
+ {
+ /* PML4 */
+ rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> X86_PML4_SHIFT) & X86_PML4_MASK) * 8, 8, &u64);
+ if (!(u64.u & X86_PML4E_P))
+ {
+ printf("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
+ return 0;
+ }
+
+ /* PDPTR */
+ rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64) * 8, 8, &u64);
+ if (!(u64.u & X86_PDPE_P))
+ {
+ printf("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
+ return 0;
+ }
+ if (u64.u & X86_PDPE_LM_PS)
+ return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
+
+ /* PD */
+ rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
+ if (!(u64.u & X86_PDE_P))
+ {
+ printf("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
+ return 0;
+ }
+ if (u64.u & X86_PDE_PS)
+ return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
+
+ /* PT */
+ rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
+ if (!(u64.u & X86_PTE_P))
+ {
+ printf("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
+ return 0;
+ }
+ return u64.u;
+ }
+
+ if (fPAE)
+ {
+ /* PDPTR */
+ rtR0MemObjDarwinReadPhys((u64.u & X86_CR3_PAE_PAGE_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE) * 8, 8, &u64);
+ if (!(u64.u & X86_PDE_P))
+ return 0;
+
+ /* PD */
+ rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
+ if (!(u64.u & X86_PDE_P))
+ return 0;
+ if (u64.u & X86_PDE_PS)
+ return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
+
+ /* PT */
+ rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
+ if (!(u64.u & X86_PTE_P))
+ return 0;
+ return u64.u;
+ }
+
+ /* PD */
+ rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_SHIFT) & X86_PD_MASK) * 4, 4, &u64);
+ if (!(u64.au32[0] & X86_PDE_P))
+ return 0;
+ if (u64.au32[0] & X86_PDE_PS)
+ return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
+
+ /* PT */
+ rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_SHIFT) & X86_PT_MASK) * 4, 4, &u64);
+ if (!(u64.au32[0] & X86_PTE_P))
+ return 0;
+ return u64.au32[0];
+
+ return 0;
+}
+
+# endif /* unused */
+#endif /* RT_STRICT */
+
+DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
+{
+ PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
+ */
+ if (pMemDarwin->pMemDesc)
+ {
+ pMemDarwin->pMemDesc->complete();
+ pMemDarwin->pMemDesc->release();
+ pMemDarwin->pMemDesc = NULL;
+ }
+
+ if (pMemDarwin->pMemMap)
+ {
+ pMemDarwin->pMemMap->release();
+ pMemDarwin->pMemMap = NULL;
+ }
+
+ /*
+ * Release any memory that we've allocated or locked.
+ */
+ switch (pMemDarwin->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_CONT:
+ break;
+
+ case RTR0MEMOBJTYPE_LOCK:
+ {
+#ifdef USE_VM_MAP_WIRE
+ vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
+ ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
+ : kernel_map;
+ kern_return_t kr = vm_map_unwire(Map,
+ (vm_map_offset_t)pMemDarwin->Core.pv,
+ (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
+ 0 /* not user */);
+ AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
+#endif
+ break;
+ }
+
+ case RTR0MEMOBJTYPE_PHYS:
+ /*if (pMemDarwin->Core.u.Phys.fAllocated)
+ IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
+ Assert(!pMemDarwin->Core.u.Phys.fAllocated);
+ break;
+
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_INTERNAL_ERROR;
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_INTERNAL_ERROR;
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ /* nothing to do here. */
+ break;
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_INTERNAL_ERROR;
+ }
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+
+/**
+ * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
+ *
+ * @returns IPRT status code.
+ * @retval VERR_ADDRESS_TOO_BIG try another way.
+ *
+ * @param ppMem Where to return the memory object.
+ * @param cb The page aligned memory size.
+ * @param fExecutable Whether the mapping needs to be executable.
+ * @param fContiguous Whether the backing memory needs to be contiguous.
+ * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
+ * you don't care that much or is speculating.
+ * @param MaxPhysAddr The max address to verify the result against. Use
+ * UINT64_MAX if it doesn't matter.
+ * @param enmType The object type.
+ */
+static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
+ bool fExecutable, bool fContiguous,
+ mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
+ RTR0MEMOBJTYPE enmType)
+{
+ /*
+ * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
+ * actually respects the physical memory mask (10.5.x is certainly busted),
+ * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
+ *
+ * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
+ *
+ * The kIOMemoryMapperNone flag is required since 10.8.2 (IOMMU changes?).
+ */
+ int rc;
+ size_t cbFudged = cb;
+ if (1) /** @todo Figure out why this is broken. Is it only on snow leopard? Seen allocating memory for the VM structure, last page corrupted or inaccessible. */
+ cbFudged += PAGE_SIZE;
+#if 1
+ IOOptionBits fOptions = kIOMemoryKernelUserShared | kIODirectionInOut;
+ if (fContiguous)
+ fOptions |= kIOMemoryPhysicallyContiguous;
+ if (version_major >= 12 /* 12 = 10.8.x = Mountain Kitten */)
+ fOptions |= kIOMemoryMapperNone;
+ IOBufferMemoryDescriptor *pMemDesc = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, fOptions,
+ cbFudged, PhysMask);
+#else /* Requires 10.7 SDK, but allows alignment to be specified: */
+ uint64_t uAlignment = PAGE_SIZE;
+ IOOptionBits fOptions = kIODirectionInOut | kIOMemoryMapperNone;
+ if (fContiguous || MaxPhysAddr < UINT64_MAX)
+ {
+ fOptions |= kIOMemoryPhysicallyContiguous;
+ uAlignment = 1; /* PhysMask isn't respected if higher. */
+ }
+
+ IOBufferMemoryDescriptor *pMemDesc = new IOBufferMemoryDescriptor;
+ if (pMemDesc && !pMemDesc->initWithPhysicalMask(kernel_task, fOptions, cbFudged, uAlignment, PhysMask))
+ {
+ pMemDesc->release();
+ pMemDesc = NULL;
+ }
+#endif
+ if (pMemDesc)
+ {
+ IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
+ if (IORet == kIOReturnSuccess)
+ {
+ void *pv = pMemDesc->getBytesNoCopy(0, cbFudged);
+ if (pv)
+ {
+ /*
+ * Check if it's all below 4GB.
+ */
+ addr64_t AddrPrev = 0;
+ MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
+ for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
+ {
+#ifdef __LP64__
+ addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL, kIOMemoryMapperNone);
+#else
+ addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
+#endif
+ if ( Addr > MaxPhysAddr
+ || !Addr
+ || (Addr & PAGE_OFFSET_MASK)
+ || ( fContiguous
+ && !off
+ && Addr == AddrPrev + PAGE_SIZE))
+ {
+ /* Buggy API, try allocate the memory another way. */
+ pMemDesc->complete();
+ pMemDesc->release();
+ if (PhysMask)
+ LogRel(("rtR0MemObjNativeAllocWorker: off=%x Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x - buggy API!\n",
+ off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions));
+ return VERR_ADDRESS_TOO_BIG;
+ }
+ AddrPrev = Addr;
+ }
+
+#ifdef RT_STRICT
+ /* check that the memory is actually mapped. */
+ //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
+ //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
+ RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RTThreadPreemptDisable(&State);
+ rtR0MemObjDarwinTouchPages(pv, cb);
+ RTThreadPreemptRestore(&State);
+#endif
+
+ /*
+ * Create the IPRT memory object.
+ */
+ PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
+ if (pMemDarwin)
+ {
+ if (fContiguous)
+ {
+#ifdef __LP64__
+ addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone);
+#else
+ addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
+#endif
+ RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
+ if (enmType == RTR0MEMOBJTYPE_CONT)
+ pMemDarwin->Core.u.Cont.Phys = PhysBase;
+ else if (enmType == RTR0MEMOBJTYPE_PHYS)
+ pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
+ else
+ AssertMsgFailed(("enmType=%d\n", enmType));
+ }
+
+#if 1 /* Experimental code. */
+ if (fExecutable)
+ {
+ rc = rtR0MemObjNativeProtect(&pMemDarwin->Core, 0, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
+# ifdef RT_STRICT
+ /* check that the memory is actually mapped. */
+ RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RTThreadPreemptDisable(&State2);
+ rtR0MemObjDarwinTouchPages(pv, cb);
+ RTThreadPreemptRestore(&State2);
+# endif
+
+ /* Bug 6226: Ignore KERN_PROTECTION_FAILURE on Leopard and older. */
+ if ( rc == VERR_PERMISSION_DENIED
+ && version_major <= 10 /* 10 = 10.6.x = Snow Leopard. */)
+ rc = VINF_SUCCESS;
+ }
+ else
+#endif
+ rc = VINF_SUCCESS;
+ if (RT_SUCCESS(rc))
+ {
+ pMemDarwin->pMemDesc = pMemDesc;
+ *ppMem = &pMemDarwin->Core;
+ return VINF_SUCCESS;
+ }
+
+ rtR0MemObjDelete(&pMemDarwin->Core);
+ }
+
+ if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
+ rc = VERR_NO_PHYS_MEMORY;
+ else if (enmType == RTR0MEMOBJTYPE_LOW)
+ rc = VERR_NO_LOW_MEMORY;
+ else if (enmType == RTR0MEMOBJTYPE_CONT)
+ rc = VERR_NO_CONT_MEMORY;
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ rc = VERR_MEMOBJ_INIT_FAILED;
+
+ pMemDesc->complete();
+ }
+ else
+ rc = RTErrConvertFromDarwinIO(IORet);
+ pMemDesc->release();
+ }
+ else
+ rc = VERR_MEMOBJ_INIT_FAILED;
+ Assert(rc != VERR_ADDRESS_TOO_BIG);
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
+ 0 /* PhysMask */, UINT64_MAX, RTR0MEMOBJTYPE_PAGE);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Try IOMallocPhysical/IOMallocAligned first.
+ * Then try optimistically without a physical address mask, which will always
+ * end up using IOMallocAligned.
+ *
+ * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
+ */
+ int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
+ ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
+ if (rc == VERR_ADDRESS_TOO_BIG)
+ rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
+ 0 /* PhysMask */, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
+ ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
+ RTR0MEMOBJTYPE_CONT);
+
+ /*
+ * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
+ * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
+ */
+ if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
+ rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
+ ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
+ RTR0MEMOBJTYPE_CONT);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
+{
+ /** @todo alignment */
+ if (uAlignment != PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Translate the PhysHighest address into a mask.
+ */
+ int rc;
+ if (PhysHighest == NIL_RTHCPHYS)
+ rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
+ 0 /* PhysMask*/, UINT64_MAX, RTR0MEMOBJTYPE_PHYS);
+ else
+ {
+ mach_vm_address_t PhysMask = 0;
+ PhysMask = ~(mach_vm_address_t)0;
+ while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
+ PhysMask >>= 1;
+ AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
+ PhysMask &= ~(mach_vm_address_t)PAGE_OFFSET_MASK;
+
+ rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
+ PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS);
+ }
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
+{
+ /** @todo rtR0MemObjNativeAllocPhys / darwin.
+ * This might be a bit problematic and may very well require having to create our own
+ * object which we populate with pages but without mapping it into any address space.
+ * Estimate is 2-3 days.
+ */
+ RT_NOREF(ppMem, cb, PhysHighest);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
+{
+ AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Create a descriptor for it (the validation is always true on intel macs, but
+ * as it doesn't harm us keep it in).
+ */
+ int rc = VERR_ADDRESS_TOO_BIG;
+ IOAddressRange aRanges[1] = { { Phys, cb } };
+ if ( aRanges[0].address == Phys
+ && aRanges[0].length == cb)
+ {
+ IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
+ kIODirectionInOut, NULL /*task*/);
+ if (pMemDesc)
+ {
+#ifdef __LP64__
+ Assert(Phys == pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone));
+#else
+ Assert(Phys == pMemDesc->getPhysicalSegment64(0, NULL));
+#endif
+
+ /*
+ * Create the IPRT memory object.
+ */
+ PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (pMemDarwin)
+ {
+ pMemDarwin->Core.u.Phys.PhysBase = Phys;
+ pMemDarwin->Core.u.Phys.fAllocated = false;
+ pMemDarwin->Core.u.Phys.uCachePolicy = uCachePolicy;
+ pMemDarwin->pMemDesc = pMemDesc;
+ *ppMem = &pMemDarwin->Core;
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ rc = VERR_NO_MEMORY;
+ pMemDesc->release();
+ }
+ else
+ rc = VERR_MEMOBJ_INIT_FAILED;
+ }
+ else
+ AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+/**
+ * Internal worker for locking down pages.
+ *
+ * @return IPRT status code.
+ *
+ * @param ppMem Where to store the memory object pointer.
+ * @param pv First page.
+ * @param cb Number of bytes.
+ * @param fAccess The desired access, a combination of RTMEM_PROT_READ
+ * and RTMEM_PROT_WRITE.
+ * @param Task The task \a pv and \a cb refers to.
+ */
+static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+ NOREF(fAccess);
+#ifdef USE_VM_MAP_WIRE
+ vm_map_t Map = get_task_map(Task);
+ Assert(Map);
+
+ /*
+ * First try lock the memory.
+ */
+ int rc = VERR_LOCK_FAILED;
+ kern_return_t kr = vm_map_wire(get_task_map(Task),
+ (vm_map_offset_t)pv,
+ (vm_map_offset_t)pv + cb,
+ VM_PROT_DEFAULT,
+ 0 /* not user */);
+ if (kr == KERN_SUCCESS)
+ {
+ /*
+ * Create the IPRT memory object.
+ */
+ PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
+ if (pMemDarwin)
+ {
+ pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
+ *ppMem = &pMemDarwin->Core;
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
+ Assert(kr == KERN_SUCCESS);
+ rc = VERR_NO_MEMORY;
+ }
+
+#else
+
+ /*
+ * Create a descriptor and try lock it (prepare).
+ */
+ int rc = VERR_MEMOBJ_INIT_FAILED;
+ IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
+ if (pMemDesc)
+ {
+ IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
+ if (IORet == kIOReturnSuccess)
+ {
+ /*
+ * Create the IPRT memory object.
+ */
+ PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
+ if (pMemDarwin)
+ {
+ pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
+ pMemDarwin->pMemDesc = pMemDesc;
+ *ppMem = &pMemDarwin->Core;
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ pMemDesc->complete();
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ rc = VERR_LOCK_FAILED;
+ pMemDesc->release();
+ }
+#endif
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
+{
+ return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
+{
+ return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
+{
+ RT_NOREF(ppMem, pvFixed, cb, uAlignment);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
+{
+ RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
+ unsigned fProt, size_t offSub, size_t cbSub)
+{
+ RT_NOREF(fProt);
+ AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
+
+ /*
+ * Check that the specified alignment is supported.
+ */
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Must have a memory descriptor that we can map.
+ */
+ int rc = VERR_INVALID_PARAMETER;
+ PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
+ if (pMemToMapDarwin->pMemDesc)
+ {
+#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
+ IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
+ 0,
+ kIOMapAnywhere | kIOMapDefaultCache,
+ offSub,
+ cbSub);
+#else
+ IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
+ 0,
+ kIOMapAnywhere | kIOMapDefaultCache,
+ offSub,
+ cbSub);
+#endif
+ if (pMemMap)
+ {
+ IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
+ void *pv = (void *)(uintptr_t)VirtAddr;
+ if ((uintptr_t)pv == VirtAddr)
+ {
+ //addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
+ //printf("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
+
+// /*
+// * Explicitly lock it so that we're sure it is present and that
+// * its PTEs cannot be recycled.
+// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
+// * to the options which causes prepare() to not wire the pages.
+// * This is probably a bug.
+// */
+// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
+// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
+// 1 /* count */,
+// 0 /* offset */,
+// kernel_task,
+// kIODirectionInOut | kIOMemoryTypeVirtual,
+// kIOMapperSystem);
+// if (pMemDesc)
+// {
+// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
+// if (IORet == kIOReturnSuccess)
+// {
+ /* HACK ALERT! */
+ rtR0MemObjDarwinTouchPages(pv, cbSub);
+ /** @todo First, the memory should've been mapped by now, and second, it
+ * should have the wired attribute in the PTE (bit 9). Neither
+ * seems to be the case. The disabled locking code doesn't make any
+ * difference, which is extremely odd, and breaks
+ * rtR0MemObjNativeGetPagePhysAddr (getPhysicalSegment64 -> 64 for the
+ * lock descriptor. */
+ //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
+ //printf("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr, 2);
+
+ /*
+ * Create the IPRT memory object.
+ */
+ PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
+ pv, cbSub);
+ if (pMemDarwin)
+ {
+ pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
+ pMemDarwin->pMemMap = pMemMap;
+// pMemDarwin->pMemDesc = pMemDesc;
+ *ppMem = &pMemDarwin->Core;
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+// pMemDesc->complete();
+// rc = VERR_NO_MEMORY;
+// }
+// else
+// rc = RTErrConvertFromDarwinIO(IORet);
+// pMemDesc->release();
+// }
+// else
+// rc = VERR_MEMOBJ_INIT_FAILED;
+ }
+ else
+ rc = VERR_ADDRESS_TOO_BIG;
+ pMemMap->release();
+ }
+ else
+ rc = VERR_MAP_FAILED;
+ }
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
+ unsigned fProt, RTR0PROCESS R0Process)
+{
+ RT_NOREF(fProt);
+
+ /*
+ * Check for unsupported things.
+ */
+ AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Must have a memory descriptor.
+ */
+ int rc = VERR_INVALID_PARAMETER;
+ PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
+ if (pMemToMapDarwin->pMemDesc)
+ {
+#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
+ IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
+ 0,
+ kIOMapAnywhere | kIOMapDefaultCache,
+ 0 /* offset */,
+ 0 /* length */);
+#else
+ IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
+ 0,
+ kIOMapAnywhere | kIOMapDefaultCache);
+#endif
+ if (pMemMap)
+ {
+ IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
+ void *pv = (void *)(uintptr_t)VirtAddr;
+ if ((uintptr_t)pv == VirtAddr)
+ {
+ /*
+ * Create the IPRT memory object.
+ */
+ PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
+ pv, pMemToMapDarwin->Core.cb);
+ if (pMemDarwin)
+ {
+ pMemDarwin->Core.u.Mapping.R0Process = R0Process;
+ pMemDarwin->pMemMap = pMemMap;
+ *ppMem = &pMemDarwin->Core;
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ rc = VERR_NO_MEMORY;
+ }
+ else
+ rc = VERR_ADDRESS_TOO_BIG;
+ pMemMap->release();
+ }
+ else
+ rc = VERR_MAP_FAILED;
+ }
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /* Get the map for the object. */
+ vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
+ if (!pVmMap)
+ {
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_NOT_SUPPORTED;
+ }
+
+ /*
+ * Convert the protection.
+ */
+ vm_prot_t fMachProt;
+ switch (fProt)
+ {
+ case RTMEM_PROT_NONE:
+ fMachProt = VM_PROT_NONE;
+ break;
+ case RTMEM_PROT_READ:
+ fMachProt = VM_PROT_READ;
+ break;
+ case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
+ fMachProt = VM_PROT_READ | VM_PROT_WRITE;
+ break;
+ case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
+ fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
+ break;
+ case RTMEM_PROT_WRITE:
+ fMachProt = VM_PROT_WRITE | VM_PROT_READ; /* never write-only */
+ break;
+ case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
+ fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE | VM_PROT_READ; /* never write-only or execute-only */
+ break;
+ case RTMEM_PROT_EXEC:
+ fMachProt = VM_PROT_EXECUTE | VM_PROT_READ; /* never execute-only */
+ break;
+ default:
+ AssertFailedReturn(VERR_INVALID_PARAMETER);
+ }
+
+ /*
+ * Do the job.
+ */
+ vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
+ kern_return_t krc = vm_protect(pVmMap,
+ Start,
+ cbSub,
+ false,
+ fMachProt);
+ if (krc != KERN_SUCCESS)
+ {
+ static int s_cComplaints = 0;
+ if (s_cComplaints < 10)
+ {
+ s_cComplaints++;
+ printf("rtR0MemObjNativeProtect: vm_protect(%p,%p,%p,false,%#x) -> %d\n",
+ pVmMap, (void *)Start, (void *)cbSub, fMachProt, krc);
+
+ kern_return_t krc2;
+ vm_offset_t pvReal = Start;
+ vm_size_t cbReal = 0;
+ mach_msg_type_number_t cInfo = VM_REGION_BASIC_INFO_COUNT;
+ struct vm_region_basic_info Info;
+ RT_ZERO(Info);
+ krc2 = vm_region(pVmMap, &pvReal, &cbReal, VM_REGION_BASIC_INFO, (vm_region_info_t)&Info, &cInfo, NULL);
+ printf("rtR0MemObjNativeProtect: basic info - krc2=%d pv=%p cb=%p prot=%#x max=%#x inh=%#x shr=%d rvd=%d off=%#x behavior=%#x wired=%#x\n",
+ krc2, (void *)pvReal, (void *)cbReal, Info.protection, Info.max_protection, Info.inheritance,
+ Info.shared, Info.reserved, Info.offset, Info.behavior, Info.user_wired_count);
+ }
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return RTErrConvertFromDarwinKern(krc);
+ }
+
+ /*
+ * Touch the pages if they should be writable afterwards and accessible
+ * from code which should never fault. vm_protect() may leave pages
+ * temporarily write protected, possibly due to pmap no-upgrade rules?
+ *
+ * This is the same trick (or HACK ALERT if you like) as applied in
+ * rtR0MemObjNativeMapKernel.
+ */
+ if ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
+ || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
+ {
+ if (fProt & RTMEM_PROT_WRITE)
+ rtR0MemObjDarwinTouchPages((void *)Start, cbSub);
+ /*
+ * Sniff (read) read-only pages too, just to be sure.
+ */
+ else if (fProt & (RTMEM_PROT_READ | RTMEM_PROT_EXEC))
+ rtR0MemObjDarwinSniffPages((void const *)Start, cbSub);
+ }
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
+{
+ RTHCPHYS PhysAddr;
+ PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+#ifdef USE_VM_MAP_WIRE
+ /*
+ * Locked memory doesn't have a memory descriptor and
+ * needs to be handled differently.
+ */
+ if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
+ {
+ ppnum_t PgNo;
+ if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
+ PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
+ else
+ {
+ /*
+ * From what I can tell, Apple seems to have locked up the all the
+ * available interfaces that could help us obtain the pmap_t of a task
+ * or vm_map_t.
+
+ * So, we'll have to figure out where in the vm_map_t structure it is
+ * and read it our selves. ASSUMING that kernel_pmap is pointed to by
+ * kernel_map->pmap, we scan kernel_map to locate the structure offset.
+ * Not nice, but it will hopefully do the job in a reliable manner...
+ *
+ * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
+ */
+ static int s_offPmap = -1;
+ if (RT_UNLIKELY(s_offPmap == -1))
+ {
+ pmap_t const *p = (pmap_t *)kernel_map;
+ pmap_t const * const pEnd = p + 64;
+ for (; p < pEnd; p++)
+ if (*p == kernel_pmap)
+ {
+ s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
+ break;
+ }
+ AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
+ }
+ pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
+ PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
+ }
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ AssertReturn(PgNo, NIL_RTHCPHYS);
+ PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
+ Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
+ }
+ else
+#endif /* USE_VM_MAP_WIRE */
+ {
+ /*
+ * Get the memory descriptor.
+ */
+ IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
+ if (!pMemDesc)
+ pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
+ AssertReturn(pMemDesc, NIL_RTHCPHYS);
+
+ /*
+ * If we've got a memory descriptor, use getPhysicalSegment64().
+ */
+#ifdef __LP64__
+ addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL, kIOMemoryMapperNone);
+#else
+ addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
+#endif
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
+ PhysAddr = Addr;
+ AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
+ }
+
+ return PhysAddr;
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/memuserkernel-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/memuserkernel-r0drv-darwin.cpp
new file mode 100644
index 00000000..3375386a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/memuserkernel-r0drv-darwin.cpp
@@ -0,0 +1,118 @@
+/* $Id: memuserkernel-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - User & Kernel Memory, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+#include <iprt/assert.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/errcore.h>
+
+
+RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb)
+{
+ RT_ASSERT_INTS_ON();
+ IPRT_DARWIN_SAVE_EFL_AC();
+ int rc = copyin((const user_addr_t)R3PtrSrc, pvDst, cb);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb)
+{
+ RT_ASSERT_INTS_ON();
+ IPRT_DARWIN_SAVE_EFL_AC();
+ int rc = copyout(pvSrc, R3PtrDst, cb);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(bool) RTR0MemUserIsValidAddr(RTR3PTR R3Ptr)
+{
+ /* the commpage is above this. */
+#ifdef RT_ARCH_X86
+ return R3Ptr < VM_MAX_ADDRESS;
+#else
+ return R3Ptr < VM_MAX_PAGE_ADDRESS;
+#endif
+}
+
+
+RTR0DECL(bool) RTR0MemKernelIsValidAddr(void *pv)
+{
+ /* Found no public #define or symbol for checking this, so we'll
+ have to make do with thing found in the debugger and the sources. */
+#ifdef RT_ARCH_X86
+ NOREF(pv);
+ return true; /* Almost anything is a valid kernel address here. */
+
+#elif defined(RT_ARCH_AMD64)
+ return (uintptr_t)pv >= UINT64_C(0xffff800000000000);
+
+#else
+# error "PORTME"
+#endif
+}
+
+
+RTR0DECL(bool) RTR0MemAreKrnlAndUsrDifferent(void)
+{
+ /* As mentioned in RTR0MemKernelIsValidAddr, found no way of checking
+ this at compiler or runtime. */
+#ifdef RT_ARCH_X86
+ return false;
+#else
+ return true;
+#endif
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyFrom(void *pvDst, void const *pvSrc, size_t cb)
+{
+ RT_NOREF(pvDst, pvSrc, cb);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyTo(void *pvDst, void const *pvSrc, size_t cb)
+{
+ RT_NOREF(pvDst, pvSrc, cb);
+ return VERR_NOT_SUPPORTED;
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/mp-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/mp-r0drv-darwin.cpp
new file mode 100644
index 00000000..c315751a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/mp-r0drv-darwin.cpp
@@ -0,0 +1,314 @@
+/* $Id: mp-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Multiprocessor, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mp.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/cpuset.h>
+#include <iprt/err.h>
+#include "r0drv/mp-r0drv.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static int32_t volatile g_cMaxCpus = -1;
+
+
+static int rtMpDarwinInitMaxCpus(void)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ int32_t cCpus = -1;
+ size_t oldLen = sizeof(cCpus);
+ int rc = sysctlbyname("hw.ncpu", &cCpus, &oldLen, NULL, NULL);
+ if (rc)
+ {
+ printf("IPRT: sysctlbyname(hw.ncpu) failed with rc=%d!\n", rc);
+ cCpus = 64; /* whatever */
+ }
+
+ ASMAtomicWriteS32(&g_cMaxCpus, cCpus);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return cCpus;
+}
+
+
+DECLINLINE(int) rtMpDarwinMaxCpus(void)
+{
+ int cCpus = g_cMaxCpus;
+ if (RT_UNLIKELY(cCpus <= 0))
+ return rtMpDarwinInitMaxCpus();
+ return cCpus;
+}
+
+
+RTDECL(RTCPUID) RTMpCpuId(void)
+{
+ return cpu_number();
+}
+
+
+RTDECL(int) RTMpCurSetIndex(void)
+{
+ return cpu_number();
+}
+
+
+RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
+{
+ return *pidCpu = cpu_number();
+}
+
+
+RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
+{
+ return idCpu < RTCPUSET_MAX_CPUS ? (int)idCpu : -1;
+}
+
+
+RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
+{
+ return (unsigned)iCpu < RTCPUSET_MAX_CPUS ? (RTCPUID)iCpu : NIL_RTCPUID;
+}
+
+
+RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
+{
+ return rtMpDarwinMaxCpus() - 1;
+}
+
+
+RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
+{
+ return idCpu < RTCPUSET_MAX_CPUS
+ && idCpu < (RTCPUID)rtMpDarwinMaxCpus();
+}
+
+
+RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
+{
+ RTCPUID idCpu;
+
+ RTCpuSetEmpty(pSet);
+ idCpu = RTMpGetMaxCpuId();
+ do
+ {
+ if (RTMpIsCpuPossible(idCpu))
+ RTCpuSetAdd(pSet, idCpu);
+ } while (idCpu-- > 0);
+ return pSet;
+}
+
+
+RTDECL(RTCPUID) RTMpGetCount(void)
+{
+ return rtMpDarwinMaxCpus();
+}
+
+
+RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
+{
+ /** @todo darwin R0 MP */
+ return RTMpGetSet(pSet);
+}
+
+
+RTDECL(RTCPUID) RTMpGetOnlineCount(void)
+{
+ /** @todo darwin R0 MP */
+ return RTMpGetCount();
+}
+
+
+RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
+{
+ /** @todo darwin R0 MP */
+ return RTMpIsCpuPossible(idCpu);
+}
+
+
+RTDECL(uint32_t) RTMpGetCurFrequency(RTCPUID idCpu)
+{
+ /** @todo darwin R0 MP (rainy day) */
+ RT_NOREF(idCpu);
+ return 0;
+}
+
+
+RTDECL(uint32_t) RTMpGetMaxFrequency(RTCPUID idCpu)
+{
+ /** @todo darwin R0 MP (rainy day) */
+ RT_NOREF(idCpu);
+ return 0;
+}
+
+
+RTDECL(bool) RTMpIsCpuWorkPending(void)
+{
+ /** @todo (not used on non-Windows platforms yet). */
+ return false;
+}
+
+
+/**
+ * Wrapper between the native darwin per-cpu callback and PFNRTWORKER
+ * for the RTMpOnAll API.
+ *
+ * @param pvArg Pointer to the RTMPARGS package.
+ */
+static void rtmpOnAllDarwinWrapper(void *pvArg)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvArg;
+ IPRT_DARWIN_SAVE_EFL_AC();
+ pArgs->pfnWorker(cpu_number(), pArgs->pvUser1, pArgs->pvUser2);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+}
+
+
+RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RT_ASSERT_INTS_ON();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ RTMPARGS Args;
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = NIL_RTCPUID;
+ Args.cHits = 0;
+ mp_rendezvous_no_intrs(rtmpOnAllDarwinWrapper, &Args);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Wrapper between the native darwin per-cpu callback and PFNRTWORKER
+ * for the RTMpOnOthers API.
+ *
+ * @param pvArg Pointer to the RTMPARGS package.
+ */
+static void rtmpOnOthersDarwinWrapper(void *pvArg)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvArg;
+ RTCPUID idCpu = cpu_number();
+ if (pArgs->idCpu != idCpu)
+ {
+ IPRT_DARWIN_SAVE_EFL_AC();
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ }
+}
+
+
+RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RT_ASSERT_INTS_ON();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ RTMPARGS Args;
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = RTMpCpuId();
+ Args.cHits = 0;
+ mp_rendezvous_no_intrs(rtmpOnOthersDarwinWrapper, &Args);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Wrapper between the native darwin per-cpu callback and PFNRTWORKER
+ * for the RTMpOnSpecific API.
+ *
+ * @param pvArg Pointer to the RTMPARGS package.
+ */
+static void rtmpOnSpecificDarwinWrapper(void *pvArg)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvArg;
+ RTCPUID idCpu = cpu_number();
+ if (pArgs->idCpu == idCpu)
+ {
+ IPRT_DARWIN_SAVE_EFL_AC();
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ ASMAtomicIncU32(&pArgs->cHits);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ }
+}
+
+
+RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RT_ASSERT_INTS_ON();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ RTMPARGS Args;
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = idCpu;
+ Args.cHits = 0;
+ mp_rendezvous_no_intrs(rtmpOnSpecificDarwinWrapper, &Args);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return Args.cHits == 1
+ ? VINF_SUCCESS
+ : VERR_CPU_NOT_FOUND;
+}
+
+
+RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
+{
+ RT_ASSERT_INTS_ON();
+
+ if (g_pfnR0DarwinCpuInterrupt == NULL)
+ return VERR_NOT_SUPPORTED;
+ IPRT_DARWIN_SAVE_EFL_AC(); /* paranoia */
+ /// @todo use mp_cpus_kick() when available (since 10.10)? It's probably slower (locks, mask iteration, checks), though...
+ g_pfnR0DarwinCpuInterrupt(idCpu);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
+{
+ return true;
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/process-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/process-r0drv-darwin.cpp
new file mode 100644
index 00000000..45501dce
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/process-r0drv-darwin.cpp
@@ -0,0 +1,46 @@
+/* $Id: process-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Process, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/process.h>
+
+
+RTDECL(RTPROCESS) RTProcSelf(void)
+{
+ return proc_selfpid();
+}
+
+
+RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void)
+{
+ return (RTR0PROCESS)current_task();
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/rtStrFormatKernelAddress-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/rtStrFormatKernelAddress-r0drv-darwin.cpp
new file mode 100644
index 00000000..6b7b8098
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/rtStrFormatKernelAddress-r0drv-darwin.cpp
@@ -0,0 +1,50 @@
+/* $Id: rtStrFormatKernelAddress-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - IPRT String Formatter, ring-0 addresses.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP RTLOGGROUP_STRING
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+#include <iprt/string.h>
+
+#include "internal/string.h"
+
+
+DECLHIDDEN(size_t) rtStrFormatKernelAddress(char *pszBuf, size_t cbBuf, RTR0INTPTR uPtr, signed int cchWidth,
+ signed int cchPrecision, unsigned int fFlags)
+{
+ /*
+ * Kernel addresses don't need obfuscation in R0 because the kernel log is only accessible
+ * as root.
+ */
+ Assert(cbBuf >= 64); RT_NOREF(cbBuf);
+ return RTStrFormatNumber(pszBuf, uPtr, 16, cchWidth, cchPrecision, fFlags);
+}
diff --git a/src/VBox/Runtime/r0drv/darwin/semevent-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/semevent-r0drv-darwin.cpp
new file mode 100644
index 00000000..2a542327
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/semevent-r0drv-darwin.cpp
@@ -0,0 +1,427 @@
+/* $Id: semevent-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Single Release Event Semaphores, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENT_WITHOUT_REMAPPING
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/err.h>
+#include <iprt/list.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/thread.h>
+#include <iprt/time.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Waiter entry. Lives on the stack.
+ */
+typedef struct RTSEMEVENTDARWINENTRY
+{
+ /** The list node. */
+ RTLISTNODE Node;
+ /** Flag set when waking up the thread by signal or destroy. */
+ bool volatile fWokenUp;
+} RTSEMEVENTDARWINENTRY;
+/** Pointer to waiter entry. */
+typedef RTSEMEVENTDARWINENTRY *PRTSEMEVENTDARWINENTRY;
+
+
+/**
+ * Darwin event semaphore.
+ */
+typedef struct RTSEMEVENTINTERNAL
+{
+ /** Magic value (RTSEMEVENT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** Set if there are blocked threads. */
+ bool volatile fHaveBlockedThreads;
+ /** Set if the event object is signaled. */
+ bool volatile fSignaled;
+ /** List of waiting and woken up threads. */
+ RTLISTANCHOR WaitList;
+ /** The spinlock protecting us. */
+ lck_spin_t *pSpinlock;
+} RTSEMEVENTINTERNAL, *PRTSEMEVENTINTERNAL;
+
+
+
+RTDECL(int) RTSemEventCreate(PRTSEMEVENT phEventSem)
+{
+ return RTSemEventCreateEx(phEventSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...)
+{
+ RT_NOREF(hClass, pszNameFmt);
+ AssertCompile(sizeof(RTSEMEVENTINTERNAL) > sizeof(void *));
+ AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER);
+ Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL));
+ AssertPtrReturn(phEventSem, VERR_INVALID_POINTER);
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMEVENT_MAGIC;
+ pThis->cRefs = 1;
+ pThis->fHaveBlockedThreads = false;
+ pThis->fSignaled = false;
+ RTListInit(&pThis->WaitList);
+ Assert(g_pDarwinLockGroup);
+ pThis->pSpinlock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL);
+ if (pThis->pSpinlock)
+ {
+ *phEventSem = pThis;
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ pThis->u32Magic = 0;
+ RTMemFree(pThis);
+ }
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Retain a reference to the semaphore.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventDarwinRetain(PRTSEMEVENTINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000); RT_NOREF_PV(cRefs);
+}
+
+
+/**
+ * Release a reference, destroy the thing if necessary.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventDarwinRelease(PRTSEMEVENTINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ {
+ Assert(pThis->u32Magic != RTSEMEVENT_MAGIC);
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ lck_spin_destroy(pThis->pSpinlock, g_pDarwinLockGroup);
+ RTMemFree(pThis);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ }
+}
+
+RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem)
+{
+ PRTSEMEVENTINTERNAL pThis = hEventSem;
+ if (pThis == NIL_RTSEMEVENT)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ RTCCUINTREG const fIntSaved = ASMIntDisableFlags();
+ lck_spin_lock(pThis->pSpinlock);
+
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENT_MAGIC); /* make the handle invalid */
+ ASMAtomicWriteBool(&pThis->fSignaled, false);
+
+ /* abort waiting threads. */
+ PRTSEMEVENTDARWINENTRY pWaiter;
+ RTListForEach(&pThis->WaitList, pWaiter, RTSEMEVENTDARWINENTRY, Node)
+ {
+ pWaiter->fWokenUp = true;
+ thread_wakeup_prim((event_t)pWaiter, FALSE /* all threads */, THREAD_RESTART);
+ }
+
+ lck_spin_unlock(pThis->pSpinlock);
+ ASMSetFlags(fIntSaved);
+ rtR0SemEventDarwinRelease(pThis);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem)
+{
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC,
+ ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_HANDLE);
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+
+ /*
+ * Coming here with interrupts disabled should be okay. The thread_wakeup_prim KPI is used
+ * by the interrupt handler IOFilterInterruptEventSource::disableInterruptOccurred() via
+ * signalWorkAvailable(). The only problem is if we have to destroy the event structure,
+ * as RTMemFree does not work with interrupts disabled (IOFree/kfree takes zone mutex).
+ */
+ //RT_ASSERT_INTS_ON(); - we may be called from interrupt context, which seems to be perfectly fine.
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ RTCCUINTREG const fIntSaved = ASMIntDisableFlags();
+ rtR0SemEventDarwinRetain(pThis);
+ lck_spin_lock(pThis->pSpinlock);
+
+ /*
+ * Wake up one thread.
+ */
+ ASMAtomicWriteBool(&pThis->fSignaled, true);
+
+ PRTSEMEVENTDARWINENTRY pWaiter;
+ RTListForEach(&pThis->WaitList, pWaiter, RTSEMEVENTDARWINENTRY, Node)
+ {
+ if (!pWaiter->fWokenUp)
+ {
+ pWaiter->fWokenUp = true;
+ thread_wakeup_prim((event_t)pWaiter, FALSE /* all threads */, THREAD_AWAKENED);
+ ASMAtomicWriteBool(&pThis->fSignaled, false);
+ break;
+ }
+ }
+
+ lck_spin_unlock(pThis->pSpinlock);
+ ASMSetFlags(fIntSaved);
+ rtR0SemEventDarwinRelease(pThis);
+
+ RT_ASSERT_PREEMPT_CPUID();
+ AssertMsg((fSavedEfl & X86_EFL_IF) == (ASMGetFlags() & X86_EFL_IF), ("fSavedEfl=%#x cur=%#x\n",(uint32_t)fSavedEfl, ASMGetFlags()));
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventWaitEx.
+ * @param uTimeout See RTSemEventWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventDarwinWait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ RT_NOREF(pSrcPos);
+
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ RTCCUINTREG const fIntSaved = ASMIntDisableFlags();
+ rtR0SemEventDarwinRetain(pThis);
+ lck_spin_lock(pThis->pSpinlock);
+
+ /*
+ * In the signaled state?
+ */
+ int rc;
+ if (ASMAtomicCmpXchgBool(&pThis->fSignaled, false, true))
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait. So, we'll need to convert the timeout and figure
+ * out if it's indefinite or not.
+ */
+ uint64_t uNsAbsTimeout = 1;
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
+ uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000)
+ ? uTimeout * UINT32_C(1000000)
+ : UINT64_MAX;
+ if (uTimeout == UINT64_MAX)
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ {
+ uint64_t u64Now;
+ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
+ {
+ if (uTimeout != 0)
+ {
+ u64Now = RTTimeSystemNanoTS();
+ uNsAbsTimeout = u64Now + uTimeout;
+ if (uNsAbsTimeout < u64Now) /* overflow */
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ }
+ }
+ else
+ {
+ uNsAbsTimeout = uTimeout;
+ u64Now = RTTimeSystemNanoTS();
+ uTimeout = u64Now < uTimeout ? uTimeout - u64Now : 0;
+ }
+ }
+ }
+
+ if ( !(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ && uTimeout == 0)
+ {
+ /*
+ * Poll call, we already checked the condition above so no need to
+ * wait for anything.
+ */
+ rc = VERR_TIMEOUT;
+ }
+ else
+ {
+ RTSEMEVENTDARWINENTRY Waiter;
+ Waiter.fWokenUp = false;
+ RTListAppend(&pThis->WaitList, &Waiter.Node);
+
+ for (;;)
+ {
+ /*
+ * Do the actual waiting.
+ */
+ ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, true);
+ wait_interrupt_t fInterruptible = fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE ? THREAD_ABORTSAFE : THREAD_UNINT;
+ wait_result_t rcWait;
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)&Waiter, fInterruptible);
+ else
+ {
+ uint64_t u64AbsTime;
+ nanoseconds_to_absolutetime(uNsAbsTimeout, &u64AbsTime);
+ rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT,
+ (event_t)&Waiter, fInterruptible, u64AbsTime);
+ }
+
+ /*
+ * Deal with the wait result.
+ */
+ if (RT_LIKELY(pThis->u32Magic == RTSEMEVENT_MAGIC))
+ {
+ switch (rcWait)
+ {
+ case THREAD_AWAKENED:
+ if (RT_LIKELY(Waiter.fWokenUp))
+ rc = VINF_SUCCESS;
+ else if (fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE)
+ rc = VERR_INTERRUPTED;
+ else
+ continue; /* Seen this happen after fork/exec/something. */
+ break;
+
+ case THREAD_TIMED_OUT:
+ Assert(!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE));
+ rc = !Waiter.fWokenUp ? VERR_TIMEOUT : VINF_SUCCESS;
+ break;
+
+ case THREAD_INTERRUPTED:
+ Assert(fInterruptible != THREAD_UNINT);
+ rc = !Waiter.fWokenUp ? VERR_INTERRUPTED : VINF_SUCCESS;
+ break;
+
+ case THREAD_RESTART:
+ AssertMsg(pThis->u32Magic == ~RTSEMEVENT_MAGIC, ("%#x\n", pThis->u32Magic));
+ rc = VERR_SEM_DESTROYED;
+ break;
+
+ default:
+ AssertMsgFailed(("rcWait=%d\n", rcWait));
+ rc = VERR_INTERNAL_ERROR_3;
+ break;
+ }
+ }
+ else
+ rc = VERR_SEM_DESTROYED;
+ break;
+ }
+
+ RTListNodeRemove(&Waiter.Node);
+ }
+ }
+
+ lck_spin_unlock(pThis->pSpinlock);
+ ASMSetFlags(fIntSaved);
+ rtR0SemEventDarwinRelease(pThis);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventWaitEx(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventDarwinWait(hEventSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventDarwinWait(hEventSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+
+
+RTDECL(int) RTSemEventWaitExDebug(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventDarwinWait(hEventSem, fFlags, uTimeout, &SrcPos);
+}
+
+
+RTDECL(uint32_t) RTSemEventGetResolution(void)
+{
+ uint64_t cNs;
+ absolutetime_to_nanoseconds(1, &cNs);
+ return (uint32_t)cNs ? (uint32_t)cNs : 0;
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/semeventmulti-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/semeventmulti-r0drv-darwin.cpp
new file mode 100644
index 00000000..7c5d610f
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/semeventmulti-r0drv-darwin.cpp
@@ -0,0 +1,449 @@
+/* $Id: semeventmulti-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Multiple Release Event Semaphores, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENTMULTI_WITHOUT_REMAPPING
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/err.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/thread.h>
+#include <iprt/time.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** @name fStateAndGen values
+ * @{ */
+/** The state bit number. */
+#define RTSEMEVENTMULTIDARWIN_STATE_BIT 0
+/** The state mask. */
+#define RTSEMEVENTMULTIDARWIN_STATE_MASK RT_BIT_32(RTSEMEVENTMULTIDARWIN_STATE_BIT)
+/** The generation mask. */
+#define RTSEMEVENTMULTIDARWIN_GEN_MASK ~RTSEMEVENTMULTIDARWIN_STATE_MASK
+/** The generation shift. */
+#define RTSEMEVENTMULTIDARWIN_GEN_SHIFT 1
+/** The initial variable value. */
+#define RTSEMEVENTMULTIDARWIN_STATE_GEN_INIT UINT32_C(0xfffffffc)
+/** @} */
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Darwin multiple release event semaphore.
+ */
+typedef struct RTSEMEVENTMULTIINTERNAL
+{
+ /** Magic value (RTSEMEVENTMULTI_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The object state bit and generation counter.
+ * The generation counter is incremented every time the object is
+ * signalled. */
+ uint32_t volatile fStateAndGen;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** Set if there are blocked threads. */
+ bool volatile fHaveBlockedThreads;
+ /** The spinlock protecting us. */
+ lck_spin_t *pSpinlock;
+} RTSEMEVENTMULTIINTERNAL, *PRTSEMEVENTMULTIINTERNAL;
+
+
+
+RTDECL(int) RTSemEventMultiCreate(PRTSEMEVENTMULTI phEventMultiSem)
+{
+ return RTSemEventMultiCreateEx(phEventMultiSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass,
+ const char *pszNameFmt, ...)
+{
+ RT_NOREF(hClass, pszNameFmt);
+ AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
+ AssertCompile(sizeof(RTSEMEVENTMULTIINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phEventMultiSem, VERR_INVALID_POINTER);
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMEVENTMULTI_MAGIC;
+ pThis->fStateAndGen = RTSEMEVENTMULTIDARWIN_STATE_GEN_INIT;
+ pThis->cRefs = 1;
+ pThis->fHaveBlockedThreads = false;
+ Assert(g_pDarwinLockGroup);
+ pThis->pSpinlock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL);
+ if (pThis->pSpinlock)
+ {
+ *phEventMultiSem = pThis;
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ pThis->u32Magic = 0;
+ RTMemFree(pThis);
+ }
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Retain a reference to the semaphore.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiDarwinRetain(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+ RT_NOREF_PV(cRefs);
+}
+
+
+/**
+ * Release a reference, destroy the thing if necessary.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiDarwinRelease(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ {
+ IPRT_DARWIN_SAVE_EFL_AC();
+ Assert(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC);
+
+ lck_spin_destroy(pThis->pSpinlock, g_pDarwinLockGroup);
+ RTMemFree(pThis);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ }
+}
+
+
+RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (pThis == NIL_RTSEMEVENTMULTI)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ Assert(pThis->cRefs > 0);
+ RT_ASSERT_INTS_ON();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ RTCCUINTREG const fIntSaved = ASMIntDisableFlags();
+ lck_spin_lock(pThis->pSpinlock);
+
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENTMULTI_MAGIC); /* make the handle invalid */
+ ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTIDARWIN_GEN_MASK);
+ if (pThis->fHaveBlockedThreads)
+ {
+ /* abort waiting threads. */
+ thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_RESTART);
+ }
+
+ lck_spin_unlock(pThis->pSpinlock);
+ ASMSetFlags(fIntSaved);
+ rtR0SemEventMultiDarwinRelease(pThis);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+
+ /*
+ * Coming here with interrupts disabled should be okay. The thread_wakeup_prim KPI is used
+ * by the interrupt handler IOFilterInterruptEventSource::disableInterruptOccurred() via
+ * signalWorkAvailable(). The only problem is if we have to destroy the event structure,
+ * as RTMemFree does not work with interrupts disabled (IOFree/kfree takes zone mutex).
+ */
+ //RT_ASSERT_INTS_ON(); - we may be called from interrupt context, which seems to be perfectly fine if we disable interrupts.
+
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ RTCCUINTREG const fIntSaved = ASMIntDisableFlags();
+ rtR0SemEventMultiDarwinRetain(pThis);
+ lck_spin_lock(pThis->pSpinlock);
+
+ /*
+ * Set the signal and increment the generation counter.
+ */
+ uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ fNew += 1 << RTSEMEVENTMULTIDARWIN_GEN_SHIFT;
+ fNew |= RTSEMEVENTMULTIDARWIN_STATE_MASK;
+ ASMAtomicWriteU32(&pThis->fStateAndGen, fNew);
+
+ /*
+ * Wake up all sleeping threads.
+ */
+ if (pThis->fHaveBlockedThreads)
+ {
+ ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, false);
+ thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_AWAKENED);
+ }
+
+ lck_spin_unlock(pThis->pSpinlock);
+ ASMSetFlags(fIntSaved);
+ rtR0SemEventMultiDarwinRelease(pThis);
+
+ RT_ASSERT_PREEMPT_CPUID();
+ AssertMsg((fSavedEfl & X86_EFL_IF) == (ASMGetFlags() & X86_EFL_IF), ("fSavedEfl=%#x cur=%#x\n",(uint32_t)fSavedEfl, ASMGetFlags()));
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+ RT_ASSERT_INTS_ON();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ RTCCUINTREG const fIntSaved = ASMIntDisableFlags();
+ rtR0SemEventMultiDarwinRetain(pThis);
+ lck_spin_lock(pThis->pSpinlock);
+
+ ASMAtomicAndU32(&pThis->fStateAndGen, ~RTSEMEVENTMULTIDARWIN_STATE_MASK);
+
+ lck_spin_unlock(pThis->pSpinlock);
+ ASMSetFlags(fIntSaved);
+ rtR0SemEventMultiDarwinRelease(pThis);
+
+ RT_ASSERT_PREEMPT_CPUID();
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventMultiWaitEx.
+ * @param uTimeout See RTSemEventMultiWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventMultiDarwinWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ RT_NOREF(pSrcPos);
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ if (uTimeout != 0 || (fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ RTCCUINTREG const fIntSaved = ASMIntDisableFlags();
+ rtR0SemEventMultiDarwinRetain(pThis);
+ lck_spin_lock(pThis->pSpinlock);
+
+ /*
+ * Is the event already signalled or do we have to wait?
+ */
+ int rc;
+ uint32_t const fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ if (fOrgStateAndGen & RTSEMEVENTMULTIDARWIN_STATE_MASK)
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait. So, we'll need to convert the timeout and figure
+ * out if it's indefinite or not.
+ */
+ uint64_t uNsAbsTimeout = 1;
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
+ uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000)
+ ? uTimeout * UINT32_C(1000000)
+ : UINT64_MAX;
+ if (uTimeout == UINT64_MAX)
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ {
+ uint64_t u64Now;
+ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
+ {
+ if (uTimeout != 0)
+ {
+ u64Now = RTTimeSystemNanoTS();
+ uNsAbsTimeout = u64Now + uTimeout;
+ if (uNsAbsTimeout < u64Now) /* overflow */
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ }
+ }
+ else
+ {
+ uNsAbsTimeout = uTimeout;
+ u64Now = RTTimeSystemNanoTS();
+ uTimeout = u64Now < uTimeout ? uTimeout - u64Now : 0;
+ }
+ }
+ }
+
+ if ( !(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ && uTimeout == 0)
+ {
+ /*
+ * Poll call, we already checked the condition above so no need to
+ * wait for anything.
+ */
+ rc = VERR_TIMEOUT;
+ }
+ else
+ {
+ for (;;)
+ {
+ /*
+ * Do the actual waiting.
+ */
+ ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, true);
+ wait_interrupt_t fInterruptible = fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE ? THREAD_ABORTSAFE : THREAD_UNINT;
+ wait_result_t rcWait;
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible);
+ else
+ {
+ uint64_t u64AbsTime;
+ nanoseconds_to_absolutetime(uNsAbsTimeout, &u64AbsTime);
+ rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT,
+ (event_t)pThis, fInterruptible, u64AbsTime);
+ }
+
+ /*
+ * Deal with the wait result.
+ */
+ if (RT_LIKELY(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC))
+ {
+ switch (rcWait)
+ {
+ case THREAD_AWAKENED:
+ if (RT_LIKELY(ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen))
+ rc = VINF_SUCCESS;
+ else if (fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE)
+ rc = VERR_INTERRUPTED;
+ else
+ continue; /* Seen this happen after fork/exec/something. */
+ break;
+
+ case THREAD_TIMED_OUT:
+ Assert(!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE));
+ rc = VERR_TIMEOUT;
+ break;
+
+ case THREAD_INTERRUPTED:
+ Assert(fInterruptible != THREAD_UNINT);
+ rc = VERR_INTERRUPTED;
+ break;
+
+ case THREAD_RESTART:
+ AssertMsg(pThis->u32Magic == ~RTSEMEVENTMULTI_MAGIC, ("%#x\n", pThis->u32Magic));
+ rc = VERR_SEM_DESTROYED;
+ break;
+
+ default:
+ AssertMsgFailed(("rcWait=%d\n", rcWait));
+ rc = VERR_INTERNAL_ERROR_3;
+ break;
+ }
+ }
+ else
+ rc = VERR_SEM_DESTROYED;
+ break;
+ }
+ }
+ }
+
+ lck_spin_unlock(pThis->pSpinlock);
+ ASMSetFlags(fIntSaved);
+ rtR0SemEventMultiDarwinRelease(pThis);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return rc;
+}
+
+RTDECL(int) RTSemEventMultiWaitEx(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventMultiDarwinWait(hEventMultiSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventMultiDarwinWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+
+
+RTDECL(int) RTSemEventMultiWaitExDebug(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventMultiDarwinWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+}
+
+
+RTDECL(uint32_t) RTSemEventMultiGetResolution(void)
+{
+ uint64_t cNs;
+ absolutetime_to_nanoseconds(1, &cNs);
+ return (uint32_t)cNs ? (uint32_t)cNs : 0;
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/semfastmutex-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/semfastmutex-r0drv-darwin.cpp
new file mode 100644
index 00000000..dadd788c
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/semfastmutex-r0drv-darwin.cpp
@@ -0,0 +1,140 @@
+/* $Id: semfastmutex-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Fast Mutex Semaphores, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/errcore.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/thread.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the darwin semaphore structure.
+ */
+typedef struct RTSEMFASTMUTEXINTERNAL
+{
+ /** Magic value (RTSEMFASTMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** The mutex. */
+ lck_mtx_t *pMtx;
+} RTSEMFASTMUTEXINTERNAL, *PRTSEMFASTMUTEXINTERNAL;
+
+
+
+RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx)
+{
+ AssertCompile(sizeof(RTSEMFASTMUTEXINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phFastMtx, VERR_INVALID_POINTER);
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ PRTSEMFASTMUTEXINTERNAL pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMFASTMUTEX_MAGIC;
+ Assert(g_pDarwinLockGroup);
+ pThis->pMtx = lck_mtx_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL);
+ if (pThis->pMtx)
+ {
+ *phFastMtx = pThis;
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ RTMemFree(pThis);
+ }
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+}
+
+
+RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ if (pThis == NIL_RTSEMFASTMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD);
+ Assert(g_pDarwinLockGroup);
+ lck_mtx_free(pThis->pMtx, g_pDarwinLockGroup);
+ pThis->pMtx = NULL;
+ RTMemFree(pThis);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRequest(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ lck_mtx_lock(pThis->pMtx);
+
+ IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ lck_mtx_unlock(pThis->pMtx);
+
+ IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/semmutex-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/semmutex-r0drv-darwin.cpp
new file mode 100644
index 00000000..ac5f7d0a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/semmutex-r0drv-darwin.cpp
@@ -0,0 +1,407 @@
+/* $Id: semmutex-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Mutex Semaphores, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMMUTEX_WITHOUT_REMAPPING
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Darwin mutex semaphore.
+ */
+typedef struct RTSEMMUTEXINTERNAL
+{
+ /** Magic value (RTSEMMUTEX_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The number of waiting threads. */
+ uint32_t cWaiters;
+ /** The number of references. */
+ uint32_t volatile cRefs;
+ /** The number of recursions. */
+ uint32_t cRecursions;
+ /** The handle of the owner thread. */
+ RTNATIVETHREAD hNativeOwner;
+ /** The spinlock protecting us. */
+ lck_spin_t *pSpinlock;
+} RTSEMMUTEXINTERNAL, *PRTSEMMUTEXINTERNAL;
+
+
+
+RTDECL(int) RTSemMutexCreate(PRTSEMMUTEX phMutexSem)
+{
+ return RTSemMutexCreateEx(phMutexSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
+}
+
+
+RTDECL(int) RTSemMutexCreateEx(PRTSEMMUTEX phMutexSem, uint32_t fFlags,
+ RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...)
+{
+ RT_NOREF(hClass, uSubClass, pszNameFmt);
+ AssertReturn(!(fFlags & ~RTSEMMUTEX_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ AssertCompile(sizeof(RTSEMMUTEXINTERNAL) > sizeof(void *));
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMMUTEX_MAGIC;
+ pThis->cWaiters = 0;
+ pThis->cRefs = 1;
+ pThis->cRecursions = 0;
+ pThis->hNativeOwner = NIL_RTNATIVETHREAD;
+ Assert(g_pDarwinLockGroup);
+ pThis->pSpinlock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL);
+ if (pThis->pSpinlock)
+ {
+ *phMutexSem = pThis;
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ RTMemFree(pThis);
+ }
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Called when the refcount reaches zero.
+ */
+static void rtSemMutexDarwinFree(PRTSEMMUTEXINTERNAL pThis)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ lck_spin_unlock(pThis->pSpinlock);
+ lck_spin_destroy(pThis->pSpinlock, g_pDarwinLockGroup);
+ RTMemFree(pThis);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+}
+
+
+RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMutexSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
+ if (pThis == NIL_RTSEMMUTEX)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Kill it, wake up all waiting threads and release the reference.
+ */
+ AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMMUTEX_MAGIC, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE);
+ lck_spin_lock(pThis->pSpinlock);
+
+ if (pThis->cWaiters > 0)
+ thread_wakeup_prim((event_t)pThis, FALSE /* one_thread */, THREAD_RESTART);
+
+ if (ASMAtomicDecU32(&pThis->cRefs) == 0)
+ rtSemMutexDarwinFree(pThis);
+ else
+ lck_spin_unlock(pThis->pSpinlock);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Internal worker for the sleep scenario.
+ *
+ * Called owning the spinlock, returns without it.
+ *
+ * @returns IPRT status code.
+ * @param pThis The mutex instance.
+ * @param cMillies The timeout.
+ * @param fInterruptible Whether it's interruptible
+ * (RTSemMutexRequestNoResume) or not
+ * (RTSemMutexRequest).
+ * @param hNativeSelf The thread handle of the caller.
+ */
+static int rtR0SemMutexDarwinRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
+ wait_interrupt_t fInterruptible, RTNATIVETHREAD hNativeSelf)
+{
+ /*
+ * Grab a reference and indicate that we're waiting.
+ */
+ pThis->cWaiters++;
+ ASMAtomicIncU32(&pThis->cRefs);
+
+ /*
+ * Go to sleep, use the address of the mutex instance as sleep/blocking/event id.
+ */
+ wait_result_t rcWait;
+ if (cMillies == RT_INDEFINITE_WAIT)
+ rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible);
+ else
+ {
+ uint64_t u64AbsTime;
+ nanoseconds_to_absolutetime(cMillies * UINT64_C(1000000), &u64AbsTime);
+ u64AbsTime += mach_absolute_time();
+
+ rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT,
+ (event_t)pThis, fInterruptible, u64AbsTime);
+ }
+
+ /*
+ * Translate the rc.
+ */
+ int rc;
+ switch (rcWait)
+ {
+ case THREAD_AWAKENED:
+ if (RT_LIKELY(pThis->u32Magic == RTSEMMUTEX_MAGIC))
+ {
+ if (RT_LIKELY( pThis->cRecursions == 0
+ && pThis->hNativeOwner == NIL_RTNATIVETHREAD))
+ {
+ pThis->cRecursions = 1;
+ pThis->hNativeOwner = hNativeSelf;
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ Assert(pThis->cRecursions == 0);
+ Assert(pThis->hNativeOwner == NIL_RTNATIVETHREAD);
+ rc = VERR_INTERNAL_ERROR_3;
+ }
+ }
+ else
+ rc = VERR_SEM_DESTROYED;
+ break;
+
+ case THREAD_TIMED_OUT:
+ Assert(cMillies != RT_INDEFINITE_WAIT);
+ rc = VERR_TIMEOUT;
+ break;
+
+ case THREAD_INTERRUPTED:
+ Assert(fInterruptible);
+ rc = VERR_INTERRUPTED;
+ break;
+
+ case THREAD_RESTART:
+ Assert(pThis->u32Magic == ~RTSEMMUTEX_MAGIC);
+ rc = VERR_SEM_DESTROYED;
+ break;
+
+ default:
+ AssertMsgFailed(("rcWait=%d\n", rcWait));
+ rc = VERR_GENERAL_FAILURE;
+ break;
+ }
+
+ /*
+ * Dereference it and quit the lock.
+ */
+ Assert(pThis->cWaiters > 0);
+ pThis->cWaiters--;
+
+ Assert(pThis->cRefs > 0);
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ rtSemMutexDarwinFree(pThis);
+ else
+ lck_spin_unlock(pThis->pSpinlock);
+ return rc;
+}
+
+
+/**
+ * Internal worker for RTSemMutexRequest and RTSemMutexRequestNoResume
+ *
+ * @returns IPRT status code.
+ * @param hMutexSem The mutex handle.
+ * @param cMillies The timeout.
+ * @param fInterruptible Whether it's interruptible
+ * (RTSemMutexRequestNoResume) or not
+ * (RTSemMutexRequest).
+ */
+DECLINLINE(int) rtR0SemMutexDarwinRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, wait_interrupt_t fInterruptible)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Grab the lock and check out the state.
+ */
+ RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
+ int rc = VINF_SUCCESS;
+ lck_spin_lock(pThis->pSpinlock);
+
+ /* Recursive call? */
+ if (pThis->hNativeOwner == hNativeSelf)
+ {
+ Assert(pThis->cRecursions > 0);
+ Assert(pThis->cRecursions < 256);
+ pThis->cRecursions++;
+ }
+
+ /* Is it free and nobody ahead of us in the queue? */
+ else if ( pThis->hNativeOwner == NIL_RTNATIVETHREAD
+ && pThis->cWaiters == 0)
+ {
+ pThis->hNativeOwner = hNativeSelf;
+ pThis->cRecursions = 1;
+ }
+
+ /* Polling call? */
+ else if (cMillies == 0)
+ rc = VERR_TIMEOUT;
+
+ /* Yawn, time for a nap... */
+ else
+ {
+ rc = rtR0SemMutexDarwinRequestSleep(pThis, cMillies, fInterruptible, hNativeSelf);
+ IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
+ return rc;
+ }
+
+ lck_spin_unlock(pThis->pSpinlock);
+ IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
+ return rc;
+}
+
+
+RTDECL(int) RTSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtR0SemMutexDarwinRequest(hMutexSem, cMillies, THREAD_UNINT);
+}
+
+
+RTDECL(int) RTSemMutexRequestDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RT_SRC_POS_NOREF(); RT_NOREF(uId);
+ return RTSemMutexRequest(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtR0SemMutexDarwinRequest(hMutexSem, cMillies, THREAD_ABORTSAFE);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResumeDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RT_SRC_POS_NOREF(); RT_NOREF(uId);
+ return RTSemMutexRequestNoResume(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Take the lock and do the job.
+ */
+ RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
+ int rc = VINF_SUCCESS;
+ lck_spin_lock(pThis->pSpinlock);
+
+ if (pThis->hNativeOwner == hNativeSelf)
+ {
+ Assert(pThis->cRecursions > 0);
+ if (--pThis->cRecursions == 0)
+ {
+ pThis->hNativeOwner = NIL_RTNATIVETHREAD;
+ if (pThis->cWaiters > 0)
+ thread_wakeup_prim((event_t)pThis, TRUE /* one_thread */, THREAD_AWAKENED);
+
+ }
+ }
+ else
+ rc = VERR_NOT_OWNER;
+
+ lck_spin_unlock(pThis->pSpinlock);
+
+ AssertRC(rc);
+ IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(bool) RTSemMutexIsOwned(RTSEMMUTEX hMutexSem)
+{
+ /*
+ * Validate.
+ */
+ RTSEMMUTEXINTERNAL *pThis = hMutexSem;
+ AssertPtrReturn(pThis, false);
+ AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, false);
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Take the lock and do the check.
+ */
+ lck_spin_lock(pThis->pSpinlock);
+ bool fRc = pThis->hNativeOwner != NIL_RTNATIVETHREAD;
+ lck_spin_unlock(pThis->pSpinlock);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return fRc;
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/spinlock-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/spinlock-r0drv-darwin.cpp
new file mode 100644
index 00000000..622c5503
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/spinlock-r0drv-darwin.cpp
@@ -0,0 +1,177 @@
+/* $Id: spinlock-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Spinlocks, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/spinlock.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/errcore.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the KSPIN_LOCK type.
+ */
+typedef struct RTSPINLOCKINTERNAL
+{
+ /** Spinlock magic value (RTSPINLOCK_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** Saved interrupt flag. */
+ uint32_t volatile fIntSaved;
+ /** Creation flags. */
+ uint32_t fFlags;
+ /** The Darwin spinlock structure. */
+ lck_spin_t *pSpinLock;
+ /** The spinlock name. */
+ const char *pszName;
+} RTSPINLOCKINTERNAL, *PRTSPINLOCKINTERNAL;
+
+
+
+RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER);
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ /*
+ * Allocate.
+ */
+ AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *));
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ /*
+ * Initialize & return.
+ */
+ pThis->u32Magic = RTSPINLOCK_MAGIC;
+ pThis->fIntSaved = 0;
+ pThis->fFlags = fFlags;
+ pThis->pszName = pszName;
+ Assert(g_pDarwinLockGroup);
+ pThis->pSpinLock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL);
+ if (pThis->pSpinLock)
+ {
+ *pSpinlock = pThis;
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ RTMemFree(pThis);
+ }
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+}
+
+
+RTDECL(int) RTSpinlockDestroy(RTSPINLOCK Spinlock)
+{
+ /*
+ * Validate input.
+ */
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertMsgReturn(pThis->u32Magic == RTSPINLOCK_MAGIC,
+ ("Invalid spinlock %p magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Make the lock invalid and release the memory.
+ */
+ ASMAtomicIncU32(&pThis->u32Magic);
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ Assert(g_pDarwinLockGroup);
+ lck_spin_free(pThis->pSpinLock, g_pDarwinLockGroup);
+ pThis->pSpinLock = NULL;
+
+ RTMemFree(pThis);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+ uint32_t fIntSaved = ASMGetFlags();
+ ASMIntDisable();
+ lck_spin_lock(pThis->pSpinLock);
+ pThis->fIntSaved = fIntSaved;
+ IPRT_DARWIN_RESTORE_EFL_ONLY_AC_EX(fIntSaved);
+ }
+ else
+ {
+ IPRT_DARWIN_SAVE_EFL_AC();
+ lck_spin_lock(pThis->pSpinLock);
+ IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
+ }
+}
+
+
+RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+ uint32_t fIntSaved = pThis->fIntSaved;
+ pThis->fIntSaved = 0;
+ lck_spin_unlock(pThis->pSpinLock);
+ ASMSetFlags(fIntSaved);
+ }
+ else
+ {
+ IPRT_DARWIN_SAVE_EFL_AC();
+ lck_spin_unlock(pThis->pSpinLock);
+ IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/the-darwin-kernel.h b/src/VBox/Runtime/r0drv/darwin/the-darwin-kernel.h
new file mode 100644
index 00000000..45f41908
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/the-darwin-kernel.h
@@ -0,0 +1,240 @@
+/* $Id: the-darwin-kernel.h $ */
+/** @file
+ * IPRT - Include all necessary headers for the Darwing kernel.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_darwin_the_darwin_kernel_h
+#define IPRT_INCLUDED_SRC_r0drv_darwin_the_darwin_kernel_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+/* Problematic header(s) containing conflicts with IPRT first. (FreeBSD has fixed these ages ago.) */
+#define __STDC_CONSTANT_MACROS
+#define __STDC_LIMIT_MACROS
+#include <sys/param.h>
+#include <mach/vm_param.h>
+#undef ALIGN
+#undef MIN
+#undef MAX
+#undef PAGE_SIZE
+#undef PAGE_SHIFT
+#undef PVM
+
+
+/* Include the IPRT definitions of the conflicting #defines & typedefs. */
+#include <iprt/cdefs.h>
+#include <iprt/types.h>
+#include <iprt/param.h>
+
+
+/* After including cdefs, we can check that this really is Darwin. */
+#ifndef RT_OS_DARWIN
+# error "RT_OS_DARWIN must be defined!"
+#endif
+
+#if defined(__clang__) || RT_GNUC_PREREQ(4, 4)
+# pragma GCC diagnostic push
+#endif
+#if defined(__clang__) || RT_GNUC_PREREQ(4, 2)
+# pragma GCC diagnostic ignored "-Wc++11-extensions"
+# pragma GCC diagnostic ignored "-Wc99-extensions"
+# pragma GCC diagnostic ignored "-Wextra-semi"
+# pragma GCC diagnostic ignored "-Wzero-length-array"
+# pragma GCC diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
+#endif
+
+/* now we're ready for including the rest of the Darwin headers. */
+#include <kern/thread.h>
+#include <kern/clock.h>
+#include <kern/sched_prim.h>
+#include <kern/locks.h>
+#if defined(RT_ARCH_X86) && MAC_OS_X_VERSION_MIN_REQUIRED < 1060
+# include <i386/mp_events.h>
+#endif
+#include <libkern/libkern.h>
+#include <libkern/sysctl.h>
+#include <libkern/version.h>
+#include <mach/thread_act.h>
+#include <mach/vm_map.h>
+#include <mach/vm_region.h>
+#include <pexpert/pexpert.h>
+#include <sys/conf.h>
+#include <sys/errno.h>
+#include <sys/ioccom.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/vnode.h>
+#include <sys/fcntl.h>
+#include <IOKit/IOTypes.h>
+#include <IOKit/IOLib.h> /* Note! Has Assert down as a function. */
+#include <IOKit/IOMemoryDescriptor.h>
+#include <IOKit/IOBufferMemoryDescriptor.h>
+#include <IOKit/IOMapper.h>
+
+#if defined(__clang__) || RT_GNUC_PREREQ(4, 4)
+# pragma GCC diagnostic pop
+#endif
+
+
+/* See osfmk/kern/ast.h. */
+#ifndef AST_PREEMPT
+# define AST_PREEMPT UINT32_C(1)
+# define AST_QUANTUM UINT32_C(2)
+# define AST_URGENT UINT32_C(4)
+#endif
+
+/* This flag was added in 10.6, it seems. Should be harmless in earlier
+ releases... */
+#if MAC_OS_X_VERSION_MIN_REQUIRED < 1060
+# define kIOMemoryMapperNone UINT32_C(0x800)
+#endif
+
+/** @name Macros for preserving EFLAGS.AC (despair / paranoid)
+ * @remarks Unlike linux, we have to restore it unconditionally on darwin.
+ * @{ */
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/x86.h>
+#define IPRT_DARWIN_SAVE_EFL_AC() RTCCUINTREG const fSavedEfl = ASMGetFlags();
+#define IPRT_DARWIN_RESTORE_EFL_AC() ASMSetFlags(fSavedEfl)
+#define IPRT_DARWIN_RESTORE_EFL_ONLY_AC() ASMChangeFlags(~X86_EFL_AC, fSavedEfl & X86_EFL_AC)
+#define IPRT_DARWIN_RESTORE_EFL_ONLY_AC_EX(a_fSavedEfl) ASMChangeFlags(~X86_EFL_AC, (a_fSavedEfl) & X86_EFL_AC)
+/** @} */
+
+
+RT_C_DECLS_BEGIN
+
+/* mach/vm_types.h */
+typedef struct pmap *pmap_t;
+
+/* vm/vm_kern.h */
+extern vm_map_t kernel_map;
+
+/* vm/pmap.h */
+extern pmap_t kernel_pmap;
+
+/* kern/task.h */
+extern vm_map_t get_task_map(task_t);
+
+/* osfmk/i386/pmap.h */
+extern ppnum_t pmap_find_phys(pmap_t, addr64_t);
+
+/* vm/vm_map.h */
+extern kern_return_t vm_map_wire(vm_map_t, vm_map_offset_t, vm_map_offset_t, vm_prot_t, boolean_t);
+extern kern_return_t vm_map_unwire(vm_map_t, vm_map_offset_t, vm_map_offset_t, boolean_t);
+
+/* mach/i386/thread_act.h */
+extern kern_return_t thread_terminate(thread_t);
+
+/* osfmk/i386/mp.h */
+extern void mp_rendezvous(void (*)(void *), void (*)(void *), void (*)(void *), void *);
+extern void mp_rendezvous_no_intrs(void (*)(void *), void *);
+
+/* osfmk/i386/cpu_data.h */
+struct my_cpu_data_x86
+{
+ struct my_cpu_data_x86 *cpu_this;
+ thread_t cpu_active_thread;
+ void *cpu_int_state;
+ vm_offset_t cpu_active_stack;
+ vm_offset_t cpu_kernel_stack;
+ vm_offset_t cpu_int_stack_top;
+ int cpu_preemption_level;
+ int cpu_simple_lock_count;
+ int cpu_interrupt_level;
+ int cpu_number;
+ int cpu_phys_number;
+ cpu_id_t cpu_id;
+ int cpu_signals;
+ int cpu_mcount_off;
+ /*ast_t*/uint32_t cpu_pending_ast;
+ int cpu_type;
+ int cpu_subtype;
+ int cpu_threadtype;
+ int cpu_running;
+};
+
+/* osfmk/i386/cpu_number.h */
+extern int cpu_number(void);
+
+/* osfmk/vm/vm_user.c */
+extern kern_return_t vm_protect(vm_map_t, vm_offset_t, vm_size_t, boolean_t, vm_prot_t);
+/*extern kern_return_t vm_region(vm_map_t, vm_address_t *, vm_size_t *, vm_region_flavor_t, vm_region_info_t,
+ mach_msg_type_number_t *, mach_port_t *);*/
+
+/* i386/machine_routines.h */
+extern int ml_get_max_cpus(void);
+
+RT_C_DECLS_END
+
+
+/*
+ * Internals of the Darwin Ring-0 IPRT.
+ */
+RT_C_DECLS_BEGIN
+
+/* initterm-r0drv-darwin.cpp. */
+typedef uint32_t * (*PFNR0DARWINASTPENDING)(void);
+typedef void (*PFNR0DARWINCPUINTERRUPT)(int);
+extern lck_grp_t *g_pDarwinLockGroup;
+extern PFNR0DARWINASTPENDING g_pfnR0DarwinAstPending;
+extern PFNR0DARWINCPUINTERRUPT g_pfnR0DarwinCpuInterrupt;
+
+/* threadpreempt-r0drv-darwin.cpp */
+int rtThreadPreemptDarwinInit(void);
+void rtThreadPreemptDarwinTerm(void);
+
+RT_C_DECLS_END
+
+
+/**
+ * Converts from nanoseconds to Darwin absolute time units.
+ * @returns Darwin absolute time.
+ * @param u64Nano Time interval in nanoseconds
+ */
+DECLINLINE(uint64_t) rtDarwinAbsTimeFromNano(const uint64_t u64Nano)
+{
+ uint64_t u64AbsTime;
+ nanoseconds_to_absolutetime(u64Nano, &u64AbsTime);
+ return u64AbsTime;
+}
+
+
+#include <iprt/err.h>
+
+/**
+ * Convert from mach kernel return code to IPRT status code.
+ * @todo put this where it belongs! (i.e. in a separate file and prototype in iprt/err.h)
+ */
+DECLINLINE(int) RTErrConvertFromMachKernReturn(kern_return_t rc)
+{
+ switch (rc)
+ {
+ case KERN_SUCCESS: return VINF_SUCCESS;
+ default: return VERR_GENERAL_FAILURE;
+ }
+}
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_darwin_the_darwin_kernel_h */
+
diff --git a/src/VBox/Runtime/r0drv/darwin/thread-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/thread-r0drv-darwin.cpp
new file mode 100644
index 00000000..db5c4525
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/thread-r0drv-darwin.cpp
@@ -0,0 +1,82 @@
+/* $Id: thread-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Threads, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+
+
+
+RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
+{
+ return (RTNATIVETHREAD)current_thread();
+}
+
+
+static int rtR0ThreadDarwinSleepCommon(RTMSINTERVAL cMillies)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ uint64_t u64Deadline;
+ clock_interval_to_deadline(cMillies, kMillisecondScale, &u64Deadline);
+ clock_delay_until(u64Deadline);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadDarwinSleepCommon(cMillies);
+}
+
+
+RTDECL(int) RTThreadSleepNoLog(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadDarwinSleepCommon(cMillies);
+}
+
+
+RTDECL(bool) RTThreadYield(void)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ thread_block(THREAD_CONTINUE_NULL);
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return true; /* this is fishy */
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/thread2-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/thread2-r0drv-darwin.cpp
new file mode 100644
index 00000000..629d3f98
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/thread2-r0drv-darwin.cpp
@@ -0,0 +1,192 @@
+/* $Id: thread2-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Threads (Part 2), Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include "internal/thread.h"
+
+
+DECLHIDDEN(int) rtThreadNativeInit(void)
+{
+ /* No TLS in Ring-0. :-/ */
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(RTTHREAD) RTThreadSelf(void)
+{
+ return rtThreadGetByNative((RTNATIVETHREAD)current_thread());
+}
+
+
+DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
+{
+ /*
+ * Convert the priority type to scheduling policies.
+ * (This is really just guess work.)
+ */
+ bool fSetExtended = false;
+ thread_extended_policy Extended = { true };
+ bool fSetTimeContstraint = false;
+ thread_time_constraint_policy TimeConstraint = { 0, 0, 0, true };
+ thread_precedence_policy Precedence = { 0 };
+ switch (enmType)
+ {
+ case RTTHREADTYPE_INFREQUENT_POLLER:
+ Precedence.importance = 1;
+ break;
+
+ case RTTHREADTYPE_EMULATION:
+ Precedence.importance = 30;
+ break;
+
+ case RTTHREADTYPE_DEFAULT:
+ Precedence.importance = 31;
+ break;
+
+ case RTTHREADTYPE_MSG_PUMP:
+ Precedence.importance = 34;
+ break;
+
+ case RTTHREADTYPE_IO:
+ Precedence.importance = 98;
+ break;
+
+ case RTTHREADTYPE_TIMER:
+ Precedence.importance = 0x7fffffff;
+
+ fSetExtended = true;
+ Extended.timeshare = FALSE;
+
+ fSetTimeContstraint = true;
+ TimeConstraint.period = 0; /* not really true for a real timer thread, but we've really no idea. */
+ TimeConstraint.computation = rtDarwinAbsTimeFromNano(100000); /* 100 us*/
+ TimeConstraint.constraint = rtDarwinAbsTimeFromNano(500000); /* 500 us */
+ TimeConstraint.preemptible = FALSE;
+ break;
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", enmType));
+ return VERR_INVALID_PARAMETER;
+ }
+ RT_ASSERT_INTS_ON();
+
+ /*
+ * Do the actual modification.
+ */
+ kern_return_t kr = thread_policy_set((thread_t)pThread->Core.Key, THREAD_PRECEDENCE_POLICY,
+ (thread_policy_t)&Precedence, THREAD_PRECEDENCE_POLICY_COUNT);
+ AssertMsg(kr == KERN_SUCCESS, ("%rc\n", kr)); NOREF(kr);
+
+ if (fSetExtended)
+ {
+ kr = thread_policy_set((thread_t)pThread->Core.Key, THREAD_EXTENDED_POLICY,
+ (thread_policy_t)&Extended, THREAD_EXTENDED_POLICY_COUNT);
+ AssertMsg(kr == KERN_SUCCESS, ("%rc\n", kr));
+ }
+
+ if (fSetTimeContstraint)
+ {
+ kr = thread_policy_set((thread_t)pThread->Core.Key, THREAD_TIME_CONSTRAINT_POLICY,
+ (thread_policy_t)&TimeConstraint, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
+ AssertMsg(kr == KERN_SUCCESS, ("%rc\n", kr));
+ }
+
+ return VINF_SUCCESS; /* ignore any errors for now */
+}
+
+
+DECLHIDDEN(int) rtThreadNativeAdopt(PRTTHREADINT pThread)
+{
+ RT_NOREF(pThread);
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+DECLHIDDEN(void) rtThreadNativeWaitKludge(PRTTHREADINT pThread)
+{
+ RT_NOREF(pThread);
+ /** @todo fix RTThreadWait/RTR0Term race on darwin. */
+ RTThreadSleep(1);
+}
+
+
+DECLHIDDEN(void) rtThreadNativeDestroy(PRTTHREADINT pThread)
+{
+ RT_NOREF(pThread);
+}
+
+
+/**
+ * Native kernel thread wrapper function.
+ *
+ * This will forward to rtThreadMain and do termination upon return.
+ *
+ * @param pvArg Pointer to the argument package.
+ * @param Ignored Wait result, which we ignore.
+ */
+static void rtThreadNativeMain(void *pvArg, wait_result_t Ignored)
+{
+ RT_NOREF(Ignored);
+ const thread_t Self = current_thread();
+ PRTTHREADINT pThread = (PRTTHREADINT)pvArg;
+
+ rtThreadMain(pThread, (RTNATIVETHREAD)Self, &pThread->szName[0]);
+
+ kern_return_t kr = thread_terminate(Self);
+ AssertFatalMsgFailed(("kr=%d\n", kr));
+}
+
+
+DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ thread_t NativeThread;
+ kern_return_t kr = kernel_thread_start(rtThreadNativeMain, pThreadInt, &NativeThread);
+ if (kr == KERN_SUCCESS)
+ {
+ *pNativeThread = (RTNATIVETHREAD)NativeThread;
+ thread_deallocate(NativeThread);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return RTErrConvertFromMachKernReturn(kr);
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/threadpreempt-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/threadpreempt-r0drv-darwin.cpp
new file mode 100644
index 00000000..a82157d0
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/threadpreempt-r0drv-darwin.cpp
@@ -0,0 +1,203 @@
+/* $Id: threadpreempt-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Thread Preemption, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/cpuset.h>
+#include <iprt/errcore.h>
+#include <iprt/mp.h>
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+typedef struct RTDARWINPREEMPTHACK
+{
+ /** The spinlock we exploit for disabling preemption. */
+ lck_spin_t *pSpinLock;
+ /** The preemption count for this CPU, to guard against nested calls. */
+ uint32_t cRecursion;
+} RTDARWINPREEMPTHACK;
+typedef RTDARWINPREEMPTHACK *PRTDARWINPREEMPTHACK;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static RTDARWINPREEMPTHACK g_aPreemptHacks[RTCPUSET_MAX_CPUS];
+
+
+/**
+ * Allocates the per-cpu spin locks used to disable preemption.
+ *
+ * Called by rtR0InitNative.
+ */
+int rtThreadPreemptDarwinInit(void)
+{
+ Assert(g_pDarwinLockGroup);
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ for (size_t i = 0; i < RT_ELEMENTS(g_aPreemptHacks); i++)
+ {
+ g_aPreemptHacks[i].pSpinLock = lck_spin_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL);
+ if (!g_aPreemptHacks[i].pSpinLock)
+ return VERR_NO_MEMORY; /* (The caller will invoke rtThreadPreemptDarwinTerm) */
+ }
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Frees the per-cpu spin locks used to disable preemption.
+ *
+ * Called by rtR0TermNative.
+ */
+void rtThreadPreemptDarwinTerm(void)
+{
+ IPRT_DARWIN_SAVE_EFL_AC();
+
+ for (size_t i = 0; i < RT_ELEMENTS(g_aPreemptHacks); i++)
+ if (g_aPreemptHacks[i].pSpinLock)
+ {
+ lck_spin_free(g_aPreemptHacks[i].pSpinLock, g_pDarwinLockGroup);
+ g_aPreemptHacks[i].pSpinLock = NULL;
+ }
+
+ IPRT_DARWIN_RESTORE_EFL_AC();
+}
+
+
+RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
+{
+ RT_NOREF(hThread);
+ Assert(hThread == NIL_RTTHREAD);
+ return preemption_enabled();
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
+{
+ RT_NOREF(hThread);
+ if (!g_pfnR0DarwinAstPending)
+ return false;
+ uint32_t volatile *pfAstPending = g_pfnR0DarwinAstPending(); AssertPtr(pfAstPending);
+ uint32_t const fAstPending = *pfAstPending;
+
+ AssertMsg(!(fAstPending & UINT32_C(0xfffe0000)), ("%#x\n", fAstPending));
+ return (fAstPending & (AST_PREEMPT | AST_QUANTUM | AST_URGENT)) != 0;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
+{
+ /* yes, we think that RTThreadPreemptIsPending is reliable... */
+ return g_pfnR0DarwinAstPending != NULL;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPossible(void)
+{
+ /* yes, kernel preemption is possible. */
+ return true;
+}
+
+
+RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ Assert(pState->u32Reserved == 0);
+ pState->u32Reserved = 42;
+
+ /*
+ * Disable to prevent preemption while we grab the per-cpu spin lock.
+ * Note! Only take the lock on the first call or we end up spinning for ever.
+ */
+ RTCCUINTREG fSavedFlags = ASMIntDisableFlags();
+ RTCPUID idCpu = RTMpCpuId();
+ if (RT_UNLIKELY(idCpu < RT_ELEMENTS(g_aPreemptHacks)))
+ {
+ Assert(g_aPreemptHacks[idCpu].cRecursion < UINT32_MAX / 2);
+ if (++g_aPreemptHacks[idCpu].cRecursion == 1)
+ {
+ lck_spin_t *pSpinLock = g_aPreemptHacks[idCpu].pSpinLock;
+ if (pSpinLock)
+ lck_spin_lock(pSpinLock);
+ else
+ AssertFailed();
+ }
+ }
+ ASMSetFlags(fSavedFlags);
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
+}
+
+
+RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ Assert(pState->u32Reserved == 42);
+ pState->u32Reserved = 0;
+ RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
+
+ RTCPUID idCpu = RTMpCpuId();
+ if (RT_UNLIKELY(idCpu < RT_ELEMENTS(g_aPreemptHacks)))
+ {
+ Assert(g_aPreemptHacks[idCpu].cRecursion > 0);
+ if (--g_aPreemptHacks[idCpu].cRecursion == 0)
+ {
+ lck_spin_t *pSpinLock = g_aPreemptHacks[idCpu].pSpinLock;
+ if (pSpinLock)
+ {
+ IPRT_DARWIN_SAVE_EFL_AC();
+ lck_spin_unlock(pSpinLock);
+ IPRT_DARWIN_RESTORE_EFL_AC();
+ }
+ else
+ AssertFailed();
+ }
+ }
+}
+
+
+RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD); NOREF(hThread);
+ /** @todo Darwin: Implement RTThreadIsInInterrupt. Required for guest
+ * additions! */
+ return !ASMIntAreEnabled();
+}
+
diff --git a/src/VBox/Runtime/r0drv/darwin/time-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/time-r0drv-darwin.cpp
new file mode 100644
index 00000000..de3b947b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/darwin/time-r0drv-darwin.cpp
@@ -0,0 +1,98 @@
+/* $Id: time-r0drv-darwin.cpp $ */
+/** @file
+ * IPRT - Time, Ring-0 Driver, Darwin.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP RTLOGGROUP_TIME
+#include "the-darwin-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/time.h>
+
+#include <iprt/asm.h>
+
+
+DECLINLINE(uint64_t) rtTimeGetSystemNanoTS(void)
+{
+ static int8_t s_fSimple = -1;
+
+ /* first call: check if life is simple or not. */
+ if (s_fSimple < 0)
+ {
+ struct mach_timebase_info Info;
+ clock_timebase_info(&Info);
+ ASMAtomicXchgS8((int8_t * volatile)&s_fSimple, Info.denom == 1 && Info.numer == 1);
+ }
+
+ /* special case: absolute time is in nanoseconds */
+ if (s_fSimple)
+ return mach_absolute_time();
+
+ /* general case: let mach do the mult/div for us. */
+ uint64_t u64;
+ absolutetime_to_nanoseconds(mach_absolute_time(), &u64);
+ return u64;
+}
+
+
+RTDECL(uint64_t) RTTimeNanoTS(void)
+{
+ return rtTimeGetSystemNanoTS();
+}
+
+
+RTDECL(uint64_t) RTTimeMilliTS(void)
+{
+ return rtTimeGetSystemNanoTS() / RT_NS_1MS;
+}
+
+
+RTDECL(uint64_t) RTTimeSystemNanoTS(void)
+{
+ return rtTimeGetSystemNanoTS();
+}
+
+
+RTDECL(uint64_t) RTTimeSystemMilliTS(void)
+{
+ return rtTimeGetSystemNanoTS() / RT_NS_1MS;
+}
+
+
+RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
+{
+#if MAC_OS_X_VERSION_MIN_REQUIRED < 1060
+ uint32_t uSecs;
+ uint32_t uNanosecs;
+#else
+ clock_sec_t uSecs;
+ clock_nsec_t uNanosecs;
+#endif
+ clock_get_calendar_nanotime(&uSecs, &uNanosecs);
+ return RTTimeSpecSetNano(pTime, (uint64_t)uSecs * RT_NS_1SEC + uNanosecs);
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/Makefile.kup b/src/VBox/Runtime/r0drv/freebsd/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/Makefile.kup
diff --git a/src/VBox/Runtime/r0drv/freebsd/alloc-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/alloc-r0drv-freebsd.c
new file mode 100644
index 00000000..60294ed3
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/alloc-r0drv-freebsd.c
@@ -0,0 +1,185 @@
+/* $Id: alloc-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/param.h>
+
+#include "r0drv/alloc-r0drv.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/* These two statements will define two globals and add initializers
+ and destructors that will be called at load/unload time (I think). */
+MALLOC_DEFINE(M_IPRTHEAP, "iprtheap", "IPRT - heap");
+MALLOC_DEFINE(M_IPRTCONT, "iprtcont", "IPRT - contiguous");
+
+
+DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
+{
+ size_t cbAllocated = cb;
+ PRTMEMHDR pHdr = NULL;
+
+#ifdef RT_ARCH_AMD64
+ /*
+ * Things are a bit more complicated on AMD64 for executable memory
+ * because we need to be in the ~2GB..~0 range for code.
+ */
+ if (fFlags & RTMEMHDR_FLAG_EXEC)
+ {
+ if (fFlags & RTMEMHDR_FLAG_ANY_CTX)
+ return VERR_NOT_SUPPORTED;
+
+# ifdef USE_KMEM_ALLOC_PROT
+ pHdr = (PRTMEMHDR)kmem_alloc_prot(kernel_map, cb + sizeof(*pHdr),
+ VM_PROT_ALL, VM_PROT_ALL, KERNBASE);
+# else
+ vm_object_t pVmObject = NULL;
+ vm_offset_t Addr = KERNBASE;
+ cbAllocated = RT_ALIGN_Z(cb + sizeof(*pHdr), PAGE_SIZE);
+
+ pVmObject = vm_object_allocate(OBJT_DEFAULT, cbAllocated >> PAGE_SHIFT);
+ if (!pVmObject)
+ return VERR_NO_EXEC_MEMORY;
+
+ /* Addr contains a start address vm_map_find will start searching for suitable space at. */
+#if __FreeBSD_version >= 1000055
+ int rc = vm_map_find(kernel_map, pVmObject, 0, &Addr,
+ cbAllocated, 0, VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
+#else
+ int rc = vm_map_find(kernel_map, pVmObject, 0, &Addr,
+ cbAllocated, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
+#endif
+ if (rc == KERN_SUCCESS)
+ {
+ rc = vm_map_wire(kernel_map, Addr, Addr + cbAllocated,
+ VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
+ if (rc == KERN_SUCCESS)
+ {
+ pHdr = (PRTMEMHDR)Addr;
+
+ if (fFlags & RTMEMHDR_FLAG_ZEROED)
+ bzero(pHdr, cbAllocated);
+ }
+ else
+ vm_map_remove(kernel_map,
+ Addr,
+ Addr + cbAllocated);
+ }
+ else
+ vm_object_deallocate(pVmObject);
+# endif
+ }
+ else
+#endif
+ {
+ pHdr = (PRTMEMHDR)malloc(cb + sizeof(RTMEMHDR), M_IPRTHEAP,
+ fFlags & RTMEMHDR_FLAG_ZEROED ? M_NOWAIT | M_ZERO : M_NOWAIT);
+ }
+
+ if (RT_UNLIKELY(!pHdr))
+ return VERR_NO_MEMORY;
+
+ pHdr->u32Magic = RTMEMHDR_MAGIC;
+ pHdr->fFlags = fFlags;
+ pHdr->cb = cbAllocated;
+ pHdr->cbReq = cb;
+
+ *ppHdr = pHdr;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr)
+{
+ pHdr->u32Magic += 1;
+
+#ifdef RT_ARCH_AMD64
+ if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC)
+# ifdef USE_KMEM_ALLOC_PROT
+ kmem_free(kernel_map, (vm_offset_t)pHdr, pHdr->cb);
+# else
+ vm_map_remove(kernel_map, (vm_offset_t)pHdr, ((vm_offset_t)pHdr) + pHdr->cb);
+# endif
+ else
+#endif
+ free(pHdr, M_IPRTHEAP);
+}
+
+
+RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
+{
+ void *pv;
+
+ /*
+ * Validate input.
+ */
+ AssertPtr(pPhys);
+ Assert(cb > 0);
+
+ /*
+ * This API works in pages, so no need to do any size aligning.
+ */
+ pv = contigmalloc(cb, /* size */
+ M_IPRTCONT, /* type */
+ M_NOWAIT | M_ZERO, /* flags */
+ 0, /* lowest physical address*/
+ _4G-1, /* highest physical address */
+ PAGE_SIZE, /* alignment. */
+ 0); /* boundary */
+ if (pv)
+ {
+ Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
+ *pPhys = vtophys(pv);
+ Assert(!(*pPhys & PAGE_OFFSET_MASK));
+ }
+ return pv;
+}
+
+
+RTR0DECL(void) RTMemContFree(void *pv, size_t cb)
+{
+ if (pv)
+ {
+ AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv));
+ contigfree(pv, cb, M_IPRTCONT);
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/assert-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/assert-r0drv-freebsd.c
new file mode 100644
index 00000000..b92198c9
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/assert-r0drv-freebsd.c
@@ -0,0 +1,70 @@
+/* $Id: assert-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Assertion Workers, Ring-0 Drivers, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+
+#include <iprt/assert.h>
+#include <iprt/log.h>
+#include <iprt/string.h>
+#include <iprt/stdarg.h>
+
+#include "internal/assert.h"
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
+{
+ printf("\r\n!!Assertion Failed!!\r\n"
+ "Expression: %s\r\n"
+ "Location : %s(%d) %s\r\n",
+ pszExpr, pszFile, uLine, pszFunction);
+}
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg2V(bool fInitial, const char *pszFormat, va_list va)
+{
+ char szMsg[256];
+
+ RTStrPrintfV(szMsg, sizeof(szMsg) - 1, pszFormat, va);
+ szMsg[sizeof(szMsg) - 1] = '\0';
+ printf("%s", szMsg);
+
+ NOREF(fInitial);
+}
+
+
+RTR0DECL(void) RTR0AssertPanicSystem(void)
+{
+ /** @todo implement RTR0AssertPanicSystem. */
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/initterm-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/initterm-r0drv-freebsd.c
new file mode 100644
index 00000000..8144bf15
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/initterm-r0drv-freebsd.c
@@ -0,0 +1,53 @@
+/* $Id: initterm-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Initialization & Termination, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+
+#include <iprt/errcore.h>
+
+#include "internal/initterm.h"
+
+
+DECLHIDDEN(int) rtR0InitNative(void)
+{
+ /* nothing to do */
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0TermNative(void)
+{
+ /* nothing to undo */
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
new file mode 100644
index 00000000..6561e1bc
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
@@ -0,0 +1,905 @@
+/* $Id: memobj-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ * Copyright (c) 2011 Andriy Gapon <avg@FreeBSD.org>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+
+#include <iprt/memobj.h>
+#include <iprt/mem.h>
+#include <iprt/err.h>
+#include <iprt/assert.h>
+#include <iprt/log.h>
+#include <iprt/param.h>
+#include <iprt/process.h>
+#include "internal/memobj.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The FreeBSD version of the memory object structure.
+ */
+typedef struct RTR0MEMOBJFREEBSD
+{
+ /** The core structure. */
+ RTR0MEMOBJINTERNAL Core;
+ /** The VM object associated with the allocation. */
+ vm_object_t pObject;
+} RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD;
+
+
+MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj");
+
+
+/**
+ * Gets the virtual memory map the specified object is mapped into.
+ *
+ * @returns VM map handle on success, NULL if no map.
+ * @param pMem The memory object.
+ */
+static vm_map_t rtR0MemObjFreeBSDGetMap(PRTR0MEMOBJINTERNAL pMem)
+{
+ switch (pMem->enmType)
+ {
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_CONT:
+ return kernel_map;
+
+ case RTR0MEMOBJTYPE_PHYS:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ return NULL; /* pretend these have no mapping atm. */
+
+ case RTR0MEMOBJTYPE_LOCK:
+ return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
+
+ default:
+ return NULL;
+ }
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
+{
+ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
+ int rc;
+
+ switch (pMemFreeBSD->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_CONT:
+ rc = vm_map_remove(kernel_map,
+ (vm_offset_t)pMemFreeBSD->Core.pv,
+ (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
+ AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
+ break;
+
+ case RTR0MEMOBJTYPE_LOCK:
+ {
+ vm_map_t pMap = kernel_map;
+
+ if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
+ pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
+
+ rc = vm_map_unwire(pMap,
+ (vm_offset_t)pMemFreeBSD->Core.pv,
+ (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
+ VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
+ AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
+ break;
+ }
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ {
+ vm_map_t pMap = kernel_map;
+ if (pMemFreeBSD->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
+ pMap = &((struct proc *)pMemFreeBSD->Core.u.ResVirt.R0Process)->p_vmspace->vm_map;
+ rc = vm_map_remove(pMap,
+ (vm_offset_t)pMemFreeBSD->Core.pv,
+ (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
+ AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
+ break;
+ }
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ {
+ vm_map_t pMap = kernel_map;
+
+ if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
+ pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map;
+ rc = vm_map_remove(pMap,
+ (vm_offset_t)pMemFreeBSD->Core.pv,
+ (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
+ AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
+ break;
+ }
+
+ case RTR0MEMOBJTYPE_PHYS:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ {
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WLOCK(pMemFreeBSD->pObject);
+#else
+ VM_OBJECT_LOCK(pMemFreeBSD->pObject);
+#endif
+ vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0);
+#if __FreeBSD_version < 1000000
+ vm_page_lock_queues();
+#endif
+ for (vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0);
+ pPage != NULL;
+ pPage = vm_page_next(pPage))
+ {
+ vm_page_unwire(pPage, 0);
+ }
+#if __FreeBSD_version < 1000000
+ vm_page_unlock_queues();
+#endif
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);
+#else
+ VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
+#endif
+ vm_object_deallocate(pMemFreeBSD->pObject);
+ break;
+ }
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
+ return VERR_INTERNAL_ERROR;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+static vm_page_t rtR0MemObjFreeBSDContigPhysAllocHelper(vm_object_t pObject, vm_pindex_t iPIndex,
+ u_long cPages, vm_paddr_t VmPhysAddrHigh,
+ u_long uAlignment, bool fWire)
+{
+ vm_page_t pPages;
+ int cTries = 0;
+
+#if __FreeBSD_version > 1000000
+ int fFlags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY;
+ if (fWire)
+ fFlags |= VM_ALLOC_WIRED;
+
+ while (cTries <= 1)
+ {
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WLOCK(pObject);
+#else
+ VM_OBJECT_LOCK(pObject);
+#endif
+ pPages = vm_page_alloc_contig(pObject, iPIndex, fFlags, cPages, 0,
+ VmPhysAddrHigh, uAlignment, 0, VM_MEMATTR_DEFAULT);
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WUNLOCK(pObject);
+#else
+ VM_OBJECT_UNLOCK(pObject);
+#endif
+ if (pPages)
+ break;
+ vm_pageout_grow_cache(cTries, 0, VmPhysAddrHigh);
+ cTries++;
+ }
+
+ return pPages;
+#else
+ while (cTries <= 1)
+ {
+ pPages = vm_phys_alloc_contig(cPages, 0, VmPhysAddrHigh, uAlignment, 0);
+ if (pPages)
+ break;
+ vm_contig_grow_cache(cTries, 0, VmPhysAddrHigh);
+ cTries++;
+ }
+
+ if (!pPages)
+ return pPages;
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WLOCK(pObject);
+#else
+ VM_OBJECT_LOCK(pObject);
+#endif
+ for (vm_pindex_t iPage = 0; iPage < cPages; iPage++)
+ {
+ vm_page_t pPage = pPages + iPage;
+ vm_page_insert(pPage, pObject, iPIndex + iPage);
+ pPage->valid = VM_PAGE_BITS_ALL;
+ if (fWire)
+ {
+ pPage->wire_count = 1;
+ atomic_add_int(&cnt.v_wire_count, 1);
+ }
+ }
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WUNLOCK(pObject);
+#else
+ VM_OBJECT_UNLOCK(pObject);
+#endif
+ return pPages;
+#endif
+}
+
+static int rtR0MemObjFreeBSDPhysAllocHelper(vm_object_t pObject, u_long cPages,
+ vm_paddr_t VmPhysAddrHigh, u_long uAlignment,
+ bool fContiguous, bool fWire, int rcNoMem)
+{
+ if (fContiguous)
+ {
+ if (rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, 0, cPages, VmPhysAddrHigh,
+ uAlignment, fWire) != NULL)
+ return VINF_SUCCESS;
+ return rcNoMem;
+ }
+
+ for (vm_pindex_t iPage = 0; iPage < cPages; iPage++)
+ {
+ vm_page_t pPage = rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, iPage, 1, VmPhysAddrHigh,
+ uAlignment, fWire);
+ if (!pPage)
+ {
+ /* Free all allocated pages */
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WLOCK(pObject);
+#else
+ VM_OBJECT_LOCK(pObject);
+#endif
+ while (iPage-- > 0)
+ {
+ pPage = vm_page_lookup(pObject, iPage);
+#if __FreeBSD_version < 1000000
+ vm_page_lock_queues();
+#endif
+ if (fWire)
+ vm_page_unwire(pPage, 0);
+ vm_page_free(pPage);
+#if __FreeBSD_version < 1000000
+ vm_page_unlock_queues();
+#endif
+ }
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WUNLOCK(pObject);
+#else
+ VM_OBJECT_UNLOCK(pObject);
+#endif
+ return rcNoMem;
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+static int rtR0MemObjFreeBSDAllocHelper(PRTR0MEMOBJFREEBSD pMemFreeBSD, bool fExecutable,
+ vm_paddr_t VmPhysAddrHigh, bool fContiguous, int rcNoMem)
+{
+ vm_offset_t MapAddress = vm_map_min(kernel_map);
+ size_t cPages = atop(pMemFreeBSD->Core.cb);
+ int rc;
+
+ pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, cPages);
+
+ /* No additional object reference for auto-deallocation upon unmapping. */
+#if __FreeBSD_version >= 1000055
+ rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0,
+ &MapAddress, pMemFreeBSD->Core.cb, 0, VMFS_ANY_SPACE,
+ fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0);
+#else
+ rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0,
+ &MapAddress, pMemFreeBSD->Core.cb, VMFS_ANY_SPACE,
+ fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0);
+#endif
+
+ if (rc == KERN_SUCCESS)
+ {
+ rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages,
+ VmPhysAddrHigh, PAGE_SIZE, fContiguous,
+ false, rcNoMem);
+ if (RT_SUCCESS(rc))
+ {
+ vm_map_wire(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb,
+ VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
+
+ /* Store start address */
+ pMemFreeBSD->Core.pv = (void *)MapAddress;
+ return VINF_SUCCESS;
+ }
+
+ vm_map_remove(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb);
+ }
+ else
+ {
+ rc = rcNoMem; /** @todo fix translation (borrow from darwin) */
+ vm_object_deallocate(pMemFreeBSD->pObject);
+ }
+
+ rtR0MemObjDelete(&pMemFreeBSD->Core);
+ return rc;
+}
+DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
+ RTR0MEMOBJTYPE_PAGE, NULL, cb);
+ if (!pMemFreeBSD)
+ return VERR_NO_MEMORY;
+
+ int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, ~(vm_paddr_t)0, false, VERR_NO_MEMORY);
+ if (RT_FAILURE(rc))
+ {
+ rtR0MemObjDelete(&pMemFreeBSD->Core);
+ return rc;
+ }
+
+ *ppMem = &pMemFreeBSD->Core;
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
+ RTR0MEMOBJTYPE_LOW, NULL, cb);
+ if (!pMemFreeBSD)
+ return VERR_NO_MEMORY;
+
+ int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, false, VERR_NO_LOW_MEMORY);
+ if (RT_FAILURE(rc))
+ {
+ rtR0MemObjDelete(&pMemFreeBSD->Core);
+ return rc;
+ }
+
+ *ppMem = &pMemFreeBSD->Core;
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
+ RTR0MEMOBJTYPE_CONT, NULL, cb);
+ if (!pMemFreeBSD)
+ return VERR_NO_MEMORY;
+
+ int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, true, VERR_NO_CONT_MEMORY);
+ if (RT_FAILURE(rc))
+ {
+ rtR0MemObjDelete(&pMemFreeBSD->Core);
+ return rc;
+ }
+
+ pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
+ *ppMem = &pMemFreeBSD->Core;
+ return rc;
+}
+
+
+static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
+ size_t cb,
+ RTHCPHYS PhysHighest, size_t uAlignment,
+ bool fContiguous, int rcNoMem)
+{
+ uint32_t cPages = atop(cb);
+ vm_paddr_t VmPhysAddrHigh;
+
+ /* create the object. */
+ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
+ enmType, NULL, cb);
+ if (!pMemFreeBSD)
+ return VERR_NO_MEMORY;
+
+ pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, atop(cb));
+
+ if (PhysHighest != NIL_RTHCPHYS)
+ VmPhysAddrHigh = PhysHighest;
+ else
+ VmPhysAddrHigh = ~(vm_paddr_t)0;
+
+ int rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh,
+ uAlignment, fContiguous, true, rcNoMem);
+ if (RT_SUCCESS(rc))
+ {
+ if (fContiguous)
+ {
+ Assert(enmType == RTR0MEMOBJTYPE_PHYS);
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WLOCK(pMemFreeBSD->pObject);
+#else
+ VM_OBJECT_LOCK(pMemFreeBSD->pObject);
+#endif
+ pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(vm_page_find_least(pMemFreeBSD->pObject, 0));
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);
+#else
+ VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
+#endif
+ pMemFreeBSD->Core.u.Phys.fAllocated = true;
+ }
+
+ *ppMem = &pMemFreeBSD->Core;
+ }
+ else
+ {
+ vm_object_deallocate(pMemFreeBSD->pObject);
+ rtR0MemObjDelete(&pMemFreeBSD->Core);
+ }
+
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
+{
+ return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true, VERR_NO_MEMORY);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
+{
+ return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false, VERR_NO_PHYS_MEMORY);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
+{
+ AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
+
+ /* create the object. */
+ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (!pMemFreeBSD)
+ return VERR_NO_MEMORY;
+
+ /* there is no allocation here, it needs to be mapped somewhere first. */
+ pMemFreeBSD->Core.u.Phys.fAllocated = false;
+ pMemFreeBSD->Core.u.Phys.PhysBase = Phys;
+ pMemFreeBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
+ *ppMem = &pMemFreeBSD->Core;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker locking the memory in either kernel or user maps.
+ */
+static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, vm_map_t pVmMap,
+ vm_offset_t AddrStart, size_t cb, uint32_t fAccess,
+ RTR0PROCESS R0Process, int fFlags)
+{
+ int rc;
+ NOREF(fAccess);
+
+ /* create the object. */
+ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)AddrStart, cb);
+ if (!pMemFreeBSD)
+ return VERR_NO_MEMORY;
+
+ /*
+ * We could've used vslock here, but we don't wish to be subject to
+ * resource usage restrictions, so we'll call vm_map_wire directly.
+ */
+ rc = vm_map_wire(pVmMap, /* the map */
+ AddrStart, /* start */
+ AddrStart + cb, /* end */
+ fFlags); /* flags */
+ if (rc == KERN_SUCCESS)
+ {
+ pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
+ *ppMem = &pMemFreeBSD->Core;
+ return VINF_SUCCESS;
+ }
+ rtR0MemObjDelete(&pMemFreeBSD->Core);
+ return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
+{
+ return rtR0MemObjNativeLockInMap(ppMem,
+ &((struct proc *)R0Process)->p_vmspace->vm_map,
+ (vm_offset_t)R3Ptr,
+ cb,
+ fAccess,
+ R0Process,
+ VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
+{
+ return rtR0MemObjNativeLockInMap(ppMem,
+ kernel_map,
+ (vm_offset_t)pv,
+ cb,
+ fAccess,
+ NIL_RTR0PROCESS,
+ VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
+}
+
+
+/**
+ * Worker for the two virtual address space reservers.
+ *
+ * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
+ */
+static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
+{
+ int rc;
+
+ /*
+ * The pvFixed address range must be within the VM space when specified.
+ */
+ if ( pvFixed != (void *)-1
+ && ( (vm_offset_t)pvFixed < vm_map_min(pMap)
+ || (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
+ return VERR_INVALID_PARAMETER;
+
+ /*
+ * Check that the specified alignment is supported.
+ */
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Create the object.
+ */
+ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
+ if (!pMemFreeBSD)
+ return VERR_NO_MEMORY;
+
+ vm_offset_t MapAddress = pvFixed != (void *)-1
+ ? (vm_offset_t)pvFixed
+ : vm_map_min(pMap);
+ if (pvFixed != (void *)-1)
+ vm_map_remove(pMap,
+ MapAddress,
+ MapAddress + cb);
+
+ rc = vm_map_find(pMap, /* map */
+ NULL, /* object */
+ 0, /* offset */
+ &MapAddress, /* addr (IN/OUT) */
+ cb, /* length */
+#if __FreeBSD_version >= 1000055
+ 0, /* max addr */
+#endif
+ pvFixed == (void *)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE,
+ /* find_space */
+ VM_PROT_NONE, /* protection */
+ VM_PROT_ALL, /* max(_prot) ?? */
+ 0); /* cow (copy-on-write) */
+ if (rc == KERN_SUCCESS)
+ {
+ if (R0Process != NIL_RTR0PROCESS)
+ {
+ rc = vm_map_inherit(pMap,
+ MapAddress,
+ MapAddress + cb,
+ VM_INHERIT_SHARE);
+ AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
+ }
+ pMemFreeBSD->Core.pv = (void *)MapAddress;
+ pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
+ *ppMem = &pMemFreeBSD->Core;
+ return VINF_SUCCESS;
+ }
+
+ rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
+ rtR0MemObjDelete(&pMemFreeBSD->Core);
+ return rc;
+
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
+{
+ return rtR0MemObjNativeReserveInMap(ppMem, pvFixed, cb, uAlignment, NIL_RTR0PROCESS, kernel_map);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
+{
+ return rtR0MemObjNativeReserveInMap(ppMem, (void *)R3PtrFixed, cb, uAlignment, R0Process,
+ &((struct proc *)R0Process)->p_vmspace->vm_map);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
+ unsigned fProt, size_t offSub, size_t cbSub)
+{
+// AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
+ AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
+
+ /*
+ * Check that the specified alignment is supported.
+ */
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ int rc;
+ PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
+
+ /* calc protection */
+ vm_prot_t ProtectionFlags = 0;
+ if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
+ ProtectionFlags = VM_PROT_NONE;
+ if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
+ ProtectionFlags |= VM_PROT_READ;
+ if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
+ ProtectionFlags |= VM_PROT_WRITE;
+ if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
+ ProtectionFlags |= VM_PROT_EXECUTE;
+
+ vm_offset_t Addr = vm_map_min(kernel_map);
+ if (cbSub == 0)
+ cbSub = pMemToMap->cb - offSub;
+
+ vm_object_reference(pMemToMapFreeBSD->pObject);
+ rc = vm_map_find(kernel_map, /* Map to insert the object in */
+ pMemToMapFreeBSD->pObject, /* Object to map */
+ offSub, /* Start offset in the object */
+ &Addr, /* Start address IN/OUT */
+ cbSub, /* Size of the mapping */
+#if __FreeBSD_version >= 1000055
+ 0, /* Upper bound of mapping */
+#endif
+ VMFS_ANY_SPACE, /* Whether a suitable address should be searched for first */
+ ProtectionFlags, /* protection flags */
+ VM_PROT_ALL, /* Maximum protection flags */
+ 0); /* copy-on-write and similar flags */
+
+ if (rc == KERN_SUCCESS)
+ {
+ rc = vm_map_wire(kernel_map, Addr, Addr + cbSub, VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
+ AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
+
+ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
+ RTR0MEMOBJTYPE_MAPPING,
+ (void *)Addr,
+ cbSub);
+ if (pMemFreeBSD)
+ {
+ Assert((vm_offset_t)pMemFreeBSD->Core.pv == Addr);
+ pMemFreeBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
+ *ppMem = &pMemFreeBSD->Core;
+ return VINF_SUCCESS;
+ }
+ rc = vm_map_remove(kernel_map, Addr, Addr + cbSub);
+ AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
+ }
+ else
+ vm_object_deallocate(pMemToMapFreeBSD->pObject);
+
+ return VERR_NO_MEMORY;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
+ unsigned fProt, RTR0PROCESS R0Process)
+{
+ /*
+ * Check for unsupported stuff.
+ */
+ AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ int rc;
+ PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
+ struct proc *pProc = (struct proc *)R0Process;
+ struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
+
+ /* calc protection */
+ vm_prot_t ProtectionFlags = 0;
+ if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
+ ProtectionFlags = VM_PROT_NONE;
+ if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
+ ProtectionFlags |= VM_PROT_READ;
+ if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
+ ProtectionFlags |= VM_PROT_WRITE;
+ if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
+ ProtectionFlags |= VM_PROT_EXECUTE;
+
+ /* calc mapping address */
+ vm_offset_t AddrR3;
+ if (R3PtrFixed == (RTR3PTR)-1)
+ {
+ /** @todo is this needed?. */
+ PROC_LOCK(pProc);
+ AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
+ PROC_UNLOCK(pProc);
+ }
+ else
+ AddrR3 = (vm_offset_t)R3PtrFixed;
+
+ /* Insert the pObject in the map. */
+ vm_object_reference(pMemToMapFreeBSD->pObject);
+ rc = vm_map_find(pProcMap, /* Map to insert the object in */
+ pMemToMapFreeBSD->pObject, /* Object to map */
+ 0, /* Start offset in the object */
+ &AddrR3, /* Start address IN/OUT */
+ pMemToMap->cb, /* Size of the mapping */
+#if __FreeBSD_version >= 1000055
+ 0, /* Upper bound of the mapping */
+#endif
+ R3PtrFixed == (RTR3PTR)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE,
+ /* Whether a suitable address should be searched for first */
+ ProtectionFlags, /* protection flags */
+ VM_PROT_ALL, /* Maximum protection flags */
+ 0); /* copy-on-write and similar flags */
+
+ if (rc == KERN_SUCCESS)
+ {
+ rc = vm_map_wire(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
+ AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
+
+ rc = vm_map_inherit(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_INHERIT_SHARE);
+ AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
+
+ /*
+ * Create a mapping object for it.
+ */
+ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
+ RTR0MEMOBJTYPE_MAPPING,
+ (void *)AddrR3,
+ pMemToMap->cb);
+ if (pMemFreeBSD)
+ {
+ Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3);
+ pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
+ *ppMem = &pMemFreeBSD->Core;
+ return VINF_SUCCESS;
+ }
+
+ rc = vm_map_remove(pProcMap, AddrR3, AddrR3 + pMemToMap->cb);
+ AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
+ }
+ else
+ vm_object_deallocate(pMemToMapFreeBSD->pObject);
+
+ return VERR_NO_MEMORY;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
+{
+ vm_prot_t ProtectionFlags = 0;
+ vm_offset_t AddrStart = (uintptr_t)pMem->pv + offSub;
+ vm_offset_t AddrEnd = AddrStart + cbSub;
+ vm_map_t pVmMap = rtR0MemObjFreeBSDGetMap(pMem);
+
+ if (!pVmMap)
+ return VERR_NOT_SUPPORTED;
+
+ if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
+ ProtectionFlags = VM_PROT_NONE;
+ if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
+ ProtectionFlags |= VM_PROT_READ;
+ if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
+ ProtectionFlags |= VM_PROT_WRITE;
+ if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
+ ProtectionFlags |= VM_PROT_EXECUTE;
+
+ int krc = vm_map_protect(pVmMap, AddrStart, AddrEnd, ProtectionFlags, FALSE);
+ if (krc == KERN_SUCCESS)
+ return VINF_SUCCESS;
+
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
+{
+ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
+
+ switch (pMemFreeBSD->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_LOCK:
+ {
+ if ( pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS
+ && pMemFreeBSD->Core.u.Lock.R0Process != (RTR0PROCESS)curproc)
+ {
+ /* later */
+ return NIL_RTHCPHYS;
+ }
+
+ vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + ptoa(iPage);
+
+ struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Lock.R0Process;
+ struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
+ pmap_t pPhysicalMap = vm_map_pmap(pProcMap);
+
+ return pmap_extract(pPhysicalMap, pb);
+ }
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ {
+ vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + ptoa(iPage);
+
+ if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
+ {
+ struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process;
+ struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
+ pmap_t pPhysicalMap = vm_map_pmap(pProcMap);
+
+ return pmap_extract(pPhysicalMap, pb);
+ }
+ return vtophys(pb);
+ }
+
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ {
+ RTHCPHYS addr;
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WLOCK(pMemFreeBSD->pObject);
+#else
+ VM_OBJECT_LOCK(pMemFreeBSD->pObject);
+#endif
+ addr = VM_PAGE_TO_PHYS(vm_page_lookup(pMemFreeBSD->pObject, iPage));
+#if __FreeBSD_version >= 1000030
+ VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);
+#else
+ VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
+#endif
+ return addr;
+ }
+
+ case RTR0MEMOBJTYPE_PHYS:
+ return pMemFreeBSD->Core.u.Cont.Phys + ptoa(iPage);
+
+ case RTR0MEMOBJTYPE_CONT:
+ return pMemFreeBSD->Core.u.Phys.PhysBase + ptoa(iPage);
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ default:
+ return NIL_RTHCPHYS;
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/memuserkernel-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/memuserkernel-r0drv-freebsd.c
new file mode 100644
index 00000000..a1b3d295
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/memuserkernel-r0drv-freebsd.c
@@ -0,0 +1,83 @@
+/* $Id: memuserkernel-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - User & Kernel Memory, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+
+#include <iprt/mem.h>
+#include <iprt/errcore.h>
+
+
+RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb)
+{
+ int rc = copyin((const void *)R3PtrSrc, pvDst, cb);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb)
+{
+ int rc = copyout(pvSrc, (void *)R3PtrDst, cb);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(bool) RTR0MemUserIsValidAddr(RTR3PTR R3Ptr)
+{
+ return R3Ptr < VM_MAXUSER_ADDRESS;
+}
+
+
+RTR0DECL(bool) RTR0MemKernelIsValidAddr(void *pv)
+{
+ return (uintptr_t)pv >= VM_MAXUSER_ADDRESS;
+}
+
+
+RTR0DECL(bool) RTR0MemAreKrnlAndUsrDifferent(void)
+{
+ return true;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyFrom(void *pvDst, void const *pvSrc, size_t cb)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyTo(void *pvDst, void const *pvSrc, size_t cb)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/mp-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/mp-r0drv-freebsd.c
new file mode 100644
index 00000000..681fbbbe
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/mp-r0drv-freebsd.c
@@ -0,0 +1,308 @@
+/* $Id: mp-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Multiprocessor, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+
+#include <iprt/mp.h>
+#include <iprt/err.h>
+#include <iprt/asm.h>
+#include <iprt/cpuset.h>
+#include "r0drv/mp-r0drv.h"
+
+
+RTDECL(RTCPUID) RTMpCpuId(void)
+{
+ return curcpu;
+}
+
+
+RTDECL(int) RTMpCurSetIndex(void)
+{
+ return curcpu;
+}
+
+
+RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
+{
+ return *pidCpu = curcpu;
+}
+
+
+RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
+{
+ return idCpu < RTCPUSET_MAX_CPUS && idCpu <= mp_maxid ? (int)idCpu : -1;
+}
+
+
+RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
+{
+ return (unsigned)iCpu <= mp_maxid ? (RTCPUID)iCpu : NIL_RTCPUID;
+}
+
+
+RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
+{
+ return mp_maxid;
+}
+
+
+RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
+{
+ return idCpu <= mp_maxid;
+}
+
+
+RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
+{
+ RTCPUID idCpu;
+
+ RTCpuSetEmpty(pSet);
+ idCpu = RTMpGetMaxCpuId();
+ do
+ {
+ if (RTMpIsCpuPossible(idCpu))
+ RTCpuSetAdd(pSet, idCpu);
+ } while (idCpu-- > 0);
+ return pSet;
+}
+
+
+RTDECL(RTCPUID) RTMpGetCount(void)
+{
+ return mp_maxid + 1;
+}
+
+
+RTDECL(RTCPUID) RTMpGetCoreCount(void)
+{
+ return mp_maxid + 1;
+}
+
+RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
+{
+ return idCpu <= mp_maxid
+ && !CPU_ABSENT(idCpu);
+}
+
+
+RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
+{
+ RTCPUID idCpu;
+
+ RTCpuSetEmpty(pSet);
+ idCpu = RTMpGetMaxCpuId();
+ do
+ {
+ if (RTMpIsCpuOnline(idCpu))
+ RTCpuSetAdd(pSet, idCpu);
+ } while (idCpu-- > 0);
+
+ return pSet;
+}
+
+
+RTDECL(RTCPUID) RTMpGetOnlineCount(void)
+{
+ return mp_ncpus;
+}
+
+
+/**
+ * Wrapper between the native FreeBSD per-cpu callback and PFNRTWORKER
+ * for the RTMpOnAll API.
+ *
+ * @param pvArg Pointer to the RTMPARGS package.
+ */
+static void rtmpOnAllFreeBSDWrapper(void *pvArg)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvArg;
+ pArgs->pfnWorker(curcpu, pArgs->pvUser1, pArgs->pvUser2);
+}
+
+
+RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RTMPARGS Args;
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = NIL_RTCPUID;
+ Args.cHits = 0;
+ smp_rendezvous(NULL, rtmpOnAllFreeBSDWrapper, smp_no_rendevous_barrier, &Args);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Wrapper between the native FreeBSD per-cpu callback and PFNRTWORKER
+ * for the RTMpOnOthers API.
+ *
+ * @param pvArg Pointer to the RTMPARGS package.
+ */
+static void rtmpOnOthersFreeBSDWrapper(void *pvArg)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvArg;
+ RTCPUID idCpu = curcpu;
+ if (pArgs->idCpu != idCpu)
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+}
+
+
+RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ /* Will panic if no rendezvousing cpus, so check up front. */
+ if (RTMpGetOnlineCount() > 1)
+ {
+#if __FreeBSD_version >= 900000
+ cpuset_t Mask;
+#elif __FreeBSD_version >= 700000
+ cpumask_t Mask;
+#endif
+ RTMPARGS Args;
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = RTMpCpuId();
+ Args.cHits = 0;
+#if __FreeBSD_version >= 700000
+# if __FreeBSD_version >= 900000
+ Mask = all_cpus;
+ CPU_CLR(curcpu, &Mask);
+# else
+ Mask = ~(cpumask_t)curcpu;
+# endif
+ smp_rendezvous_cpus(Mask, NULL, rtmpOnOthersFreeBSDWrapper, smp_no_rendevous_barrier, &Args);
+#else
+ smp_rendezvous(NULL, rtmpOnOthersFreeBSDWrapper, NULL, &Args);
+#endif
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Wrapper between the native FreeBSD per-cpu callback and PFNRTWORKER
+ * for the RTMpOnSpecific API.
+ *
+ * @param pvArg Pointer to the RTMPARGS package.
+ */
+static void rtmpOnSpecificFreeBSDWrapper(void *pvArg)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvArg;
+ RTCPUID idCpu = curcpu;
+ if (pArgs->idCpu == idCpu)
+ {
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ ASMAtomicIncU32(&pArgs->cHits);
+ }
+}
+
+
+RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+#if __FreeBSD_version >= 900000
+ cpuset_t Mask;
+#elif __FreeBSD_version >= 700000
+ cpumask_t Mask;
+#endif
+ RTMPARGS Args;
+
+ /* Will panic if no rendezvousing cpus, so make sure the cpu is online. */
+ if (!RTMpIsCpuOnline(idCpu))
+ return VERR_CPU_NOT_FOUND;
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = idCpu;
+ Args.cHits = 0;
+#if __FreeBSD_version >= 700000
+# if __FreeBSD_version >= 900000
+ CPU_SETOF(idCpu, &Mask);
+# else
+ Mask = (cpumask_t)1 << idCpu;
+# endif
+ smp_rendezvous_cpus(Mask, NULL, rtmpOnSpecificFreeBSDWrapper, smp_no_rendevous_barrier, &Args);
+#else
+ smp_rendezvous(NULL, rtmpOnSpecificFreeBSDWrapper, NULL, &Args);
+#endif
+ return Args.cHits == 1
+ ? VINF_SUCCESS
+ : VERR_CPU_NOT_FOUND;
+}
+
+
+#if __FreeBSD_version >= 700000
+/**
+ * Dummy callback for RTMpPokeCpu.
+ * @param pvArg Ignored
+ */
+static void rtmpFreeBSDPokeCallback(void *pvArg)
+{
+ NOREF(pvArg);
+}
+
+
+RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
+{
+#if __FreeBSD_version >= 900000
+ cpuset_t Mask;
+#elif __FreeBSD_version >= 700000
+ cpumask_t Mask;
+#endif
+
+ /* Will panic if no rendezvousing cpus, so make sure the cpu is online. */
+ if (!RTMpIsCpuOnline(idCpu))
+ return VERR_CPU_NOT_FOUND;
+
+# if __FreeBSD_version >= 900000
+ CPU_SETOF(idCpu, &Mask);
+# else
+ Mask = (cpumask_t)1 << idCpu;
+# endif
+ smp_rendezvous_cpus(Mask, NULL, rtmpFreeBSDPokeCallback, smp_no_rendevous_barrier, NULL);
+
+ return VINF_SUCCESS;
+}
+
+#else /* < 7.0 */
+RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
+{
+ return VERR_NOT_SUPPORTED;
+}
+#endif /* < 7.0 */
+
+
+RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
+{
+ return true;
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/process-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/process-r0drv-freebsd.c
new file mode 100644
index 00000000..02ecf25f
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/process-r0drv-freebsd.c
@@ -0,0 +1,51 @@
+/* $Id: process-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Process Management, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+
+#include <iprt/process.h>
+
+
+RTDECL(RTPROCESS) RTProcSelf(void)
+{
+ struct proc *pSelf = curproc;
+ return pSelf->p_pid;
+}
+
+
+RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void)
+{
+ return (RTR0PROCESS)curproc;
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/semevent-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/semevent-r0drv-freebsd.c
new file mode 100644
index 00000000..f3635f92
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/semevent-r0drv-freebsd.c
@@ -0,0 +1,256 @@
+/* $Id: semevent-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Single Release Event Semaphores, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENT_WITHOUT_REMAPPING
+#include "the-freebsd-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+
+#include "sleepqueue-r0drv-freebsd.h"
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * FreeBSD event semaphore.
+ */
+typedef struct RTSEMEVENTINTERNAL
+{
+ /** Magic value (RTSEMEVENT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The object status - !0 when signaled and 0 when reset. */
+ uint32_t volatile fState;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+} RTSEMEVENTINTERNAL, *PRTSEMEVENTINTERNAL;
+
+
+RTDECL(int) RTSemEventCreate(PRTSEMEVENT phEventSem)
+{
+ return RTSemEventCreateEx(phEventSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...)
+{
+ AssertCompile(sizeof(RTSEMEVENTINTERNAL) > sizeof(void *));
+ AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER);
+ Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL));
+ AssertPtrReturn(phEventSem, VERR_INVALID_POINTER);
+
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)RTMemAllocZ(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ pThis->u32Magic = RTSEMEVENT_MAGIC;
+ pThis->cRefs = 1;
+ pThis->fState = 0;
+
+ *phEventSem = pThis;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Retains a reference to the event semaphore.
+ *
+ * @param pThis The event semaphore.
+ */
+DECLINLINE(void) rtR0SemEventBsdRetain(PRTSEMEVENTINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs < 100000); NOREF(cRefs);
+}
+
+
+/**
+ * Releases a reference to the event semaphore.
+ *
+ * @param pThis The event semaphore.
+ */
+DECLINLINE(void) rtR0SemEventBsdRelease(PRTSEMEVENTINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ RTMemFree(pThis);
+}
+
+
+RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = hEventSem;
+ if (pThis == NIL_RTSEMEVENT)
+ return VINF_SUCCESS;
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ Assert(pThis->cRefs > 0);
+
+ /*
+ * Invalidate it and signal the object just in case.
+ */
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENT_MAGIC);
+ ASMAtomicWriteU32(&pThis->fState, 0);
+ rtR0SemBsdBroadcast(pThis);
+ rtR0SemEventBsdRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ rtR0SemEventBsdRetain(pThis);
+
+ /*
+ * Signal the event object.
+ */
+ ASMAtomicWriteU32(&pThis->fState, 1);
+ rtR0SemBsdSignal(pThis);
+ rtR0SemEventBsdRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+/**
+ * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventWaitEx.
+ * @param uTimeout See RTSemEventWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventWait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ int rc;
+
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ rtR0SemEventBsdRetain(pThis);
+
+ /*
+ * Try grab the event without setting up the wait.
+ */
+ if (ASMAtomicCmpXchgU32(&pThis->fState, 0, 1))
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait.
+ */
+ RTR0SEMBSDSLEEP Wait;
+ rc = rtR0SemBsdWaitInit(&Wait, fFlags, uTimeout, pThis);
+ if (RT_SUCCESS(rc))
+ {
+ for (;;)
+ {
+ /* The destruction test. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else
+ {
+ rtR0SemBsdWaitPrepare(&Wait);
+
+ /* Check the exit conditions. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else if (ASMAtomicCmpXchgU32(&pThis->fState, 0, 1))
+ rc = VINF_SUCCESS;
+ else if (rtR0SemBsdWaitHasTimedOut(&Wait))
+ rc = VERR_TIMEOUT;
+ else if (rtR0SemBsdWaitWasInterrupted(&Wait))
+ rc = VERR_INTERRUPTED;
+ else
+ {
+ /* Do the wait and then recheck the conditions. */
+ rtR0SemBsdWaitDoIt(&Wait);
+ continue;
+ }
+ }
+ break;
+ }
+
+ rtR0SemBsdWaitDelete(&Wait);
+ }
+ }
+
+ rtR0SemEventBsdRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventWaitEx(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventWait(hEventSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventWait(hEventSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+RT_EXPORT_SYMBOL(RTSemEventWaitEx);
+
+
+RTDECL(int) RTSemEventWaitExDebug(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventWait(hEventSem, fFlags, uTimeout, &SrcPos);
+}
+RT_EXPORT_SYMBOL(RTSemEventWaitExDebug);
+
+
+RTDECL(uint32_t) RTSemEventGetResolution(void)
+{
+ return 1000000000 / hz;
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/semeventmulti-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/semeventmulti-r0drv-freebsd.c
new file mode 100644
index 00000000..15609046
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/semeventmulti-r0drv-freebsd.c
@@ -0,0 +1,320 @@
+/* $Id: semeventmulti-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Multiple Release Event Semaphores, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENTMULTI_WITHOUT_REMAPPING
+#include "the-freebsd-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/lockvalidator.h>
+
+#include "sleepqueue-r0drv-freebsd.h"
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** @name fStateAndGen values
+ * @{ */
+/** The state bit number. */
+#define RTSEMEVENTMULTIBSD_STATE_BIT 0
+/** The state mask. */
+#define RTSEMEVENTMULTIBSD_STATE_MASK RT_BIT_32(RTSEMEVENTMULTIBSD_STATE_BIT)
+/** The generation mask. */
+#define RTSEMEVENTMULTIBSD_GEN_MASK ~RTSEMEVENTMULTIBSD_STATE_MASK
+/** The generation shift. */
+#define RTSEMEVENTMULTIBSD_GEN_SHIFT 1
+/** The initial variable value. */
+#define RTSEMEVENTMULTIBSD_STATE_GEN_INIT UINT32_C(0xfffffffc)
+/** @} */
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * FreeBSD multiple release event semaphore.
+ */
+typedef struct RTSEMEVENTMULTIINTERNAL
+{
+ /** Magic value (RTSEMEVENTMULTI_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The object state bit and generation counter.
+ * The generation counter is incremented every time the object is
+ * signalled. */
+ uint32_t volatile fStateAndGen;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+} RTSEMEVENTMULTIINTERNAL, *PRTSEMEVENTMULTIINTERNAL;
+
+
+RTDECL(int) RTSemEventMultiCreate(PRTSEMEVENTMULTI phEventMultiSem)
+{
+ return RTSemEventMultiCreateEx(phEventMultiSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass,
+ const char *pszNameFmt, ...)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis;
+
+ AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
+ pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMEVENTMULTI_MAGIC;
+ pThis->fStateAndGen = RTSEMEVENTMULTIBSD_STATE_GEN_INIT;
+ pThis->cRefs = 1;
+
+ *phEventMultiSem = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Retain a reference to the semaphore.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiBsdRetain(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+}
+
+
+/**
+ * Release a reference, destroy the thing if necessary.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiBsdRelease(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ {
+ Assert(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC);
+ RTMemFree(pThis);
+ }
+}
+
+
+RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (pThis == NIL_RTSEMEVENTMULTI)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ Assert(pThis->cRefs > 0);
+
+ /*
+ * Invalidate it and signal the object just in case.
+ */
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENTMULTI_MAGIC);
+ ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTIBSD_GEN_MASK);
+ rtR0SemBsdBroadcast(pThis);
+ rtR0SemEventMultiBsdRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
+{
+ uint32_t fNew;
+ uint32_t fOld;
+
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiBsdRetain(pThis);
+
+ /*
+ * Signal the event object. The cause of the parnoia here is racing to try
+ * deal with racing RTSemEventMultiSignal calls (should probably be
+ * forbidden, but it's relatively easy to handle).
+ */
+ do
+ {
+ fNew = fOld = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ fNew += 1 << RTSEMEVENTMULTIBSD_GEN_SHIFT;
+ fNew |= RTSEMEVENTMULTIBSD_STATE_MASK;
+ }
+ while (!ASMAtomicCmpXchgU32(&pThis->fStateAndGen, fNew, fOld));
+
+ rtR0SemBsdBroadcast(pThis);
+ rtR0SemEventMultiBsdRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiBsdRetain(pThis);
+
+ /*
+ * Reset it.
+ */
+ ASMAtomicAndU32(&pThis->fStateAndGen, ~RTSEMEVENTMULTIBSD_STATE_MASK);
+
+ rtR0SemEventMultiBsdRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventMultiWaitEx.
+ * @param uTimeout See RTSemEventMultiWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventMultiBsdWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ uint32_t fOrgStateAndGen;
+ int rc;
+
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiBsdRetain(pThis);
+
+ /*
+ * Is the event already signalled or do we have to wait?
+ */
+ fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ if (fOrgStateAndGen & RTSEMEVENTMULTIBSD_STATE_MASK)
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait.
+ */
+ RTR0SEMBSDSLEEP Wait;
+ rc = rtR0SemBsdWaitInit(&Wait, fFlags, uTimeout, pThis);
+ if (RT_SUCCESS(rc))
+ {
+ for (;;)
+ {
+ /* The destruction test. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else
+ {
+ rtR0SemBsdWaitPrepare(&Wait);
+
+ /* Check the exit conditions. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else if (ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen)
+ rc = VINF_SUCCESS;
+ else if (rtR0SemBsdWaitHasTimedOut(&Wait))
+ rc = VERR_TIMEOUT;
+ else if (rtR0SemBsdWaitWasInterrupted(&Wait))
+ rc = VERR_INTERRUPTED;
+ else
+ {
+ /* Do the wait and then recheck the conditions. */
+ rtR0SemBsdWaitDoIt(&Wait);
+ continue;
+ }
+ }
+ break;
+ }
+
+ rtR0SemBsdWaitDelete(&Wait);
+ }
+ }
+
+ rtR0SemEventMultiBsdRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventMultiWaitEx(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventMultiBsdWait(hEventMultiSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventMultiBsdWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiWaitEx);
+
+
+RTDECL(int) RTSemEventMultiWaitExDebug(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventMultiBsdWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiWaitExDebug);
+
+
+RTDECL(uint32_t) RTSemEventMultiGetResolution(void)
+{
+ return rtR0SemBsdWaitGetResolution();
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiGetResolution);
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/semfastmutex-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/semfastmutex-r0drv-freebsd.c
new file mode 100644
index 00000000..28517795
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/semfastmutex-r0drv-freebsd.c
@@ -0,0 +1,115 @@
+/* $Id: semfastmutex-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Fast Mutex Semaphores, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+
+#include <iprt/semaphore.h>
+#include <iprt/errcore.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the FreeBSD (sleep) mutex.
+ */
+typedef struct RTSEMFASTMUTEXINTERNAL
+{
+ /** Magic value (RTSEMFASTMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** The FreeBSD shared/exclusive lock mutex. */
+ struct sx SxLock;
+} RTSEMFASTMUTEXINTERNAL, *PRTSEMFASTMUTEXINTERNAL;
+
+
+RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx)
+{
+ AssertCompile(sizeof(RTSEMFASTMUTEXINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phFastMtx, VERR_INVALID_POINTER);
+
+ PRTSEMFASTMUTEXINTERNAL pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAllocZ(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMFASTMUTEX_MAGIC;
+ sx_init_flags(&pThis->SxLock, "IPRT Fast Mutex Semaphore", SX_DUPOK);
+
+ *phFastMtx = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ if (pThis == NIL_RTSEMFASTMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD);
+ sx_destroy(&pThis->SxLock);
+ RTMemFree(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRequest(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ sx_xlock(&pThis->SxLock);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ sx_xunlock(&pThis->SxLock);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/semmutex-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/semmutex-r0drv-freebsd.c
new file mode 100644
index 00000000..fbb44d4a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/semmutex-r0drv-freebsd.c
@@ -0,0 +1,219 @@
+/* $Id: semmutex-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Mutex Semaphores, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (C) 2010-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMMUTEX_WITHOUT_REMAPPING
+#include "the-freebsd-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+#include <iprt/time.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the FreeBSD (sleep) mutex.
+ */
+typedef struct RTSEMMUTEXINTERNAL
+{
+ /** Magic value (RTSEMMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** The FreeBSD shared/exclusive lock mutex. */
+ struct sx SxLock;
+} RTSEMMUTEXINTERNAL, *PRTSEMMUTEXINTERNAL;
+
+
+RTDECL(int) RTSemMutexCreate(PRTSEMMUTEX phMutexSem)
+{
+ AssertCompile(sizeof(RTSEMMUTEXINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phMutexSem, VERR_INVALID_POINTER);
+
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)RTMemAllocZ(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMMUTEX_MAGIC;
+ sx_init_flags(&pThis->SxLock, "IPRT Mutex Semaphore", SX_RECURSE);
+
+ *phMutexSem = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ if (pThis == NIL_RTSEMMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTSEMMUTEX_MAGIC_DEAD, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE);
+
+ sx_destroy(&pThis->SxLock);
+ RTMemFree(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ int rc;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ if (cMillies == RT_INDEFINITE_WAIT)
+ {
+ sx_xlock(&pThis->SxLock);
+ rc = VINF_SUCCESS;
+ }
+ else if (!cMillies)
+ {
+ if (sx_try_xlock(&pThis->SxLock))
+ rc = VINF_SUCCESS;
+ else
+ rc = VERR_TIMEOUT;
+ }
+ /*
+ * GROSS HACK: poll implementation of timeout.
+ */
+ /** @todo Implement timeouts in RTSemMutexRequest. */
+ else if (sx_try_xlock(&pThis->SxLock))
+ rc = VINF_SUCCESS;
+ else
+ {
+ uint64_t StartTS = RTTimeSystemMilliTS();
+ rc = VERR_TIMEOUT;
+ do
+ {
+ RTThreadSleep(1);
+ if (sx_try_xlock(&pThis->SxLock))
+ {
+ rc = VINF_SUCCESS;
+ break;
+ }
+ } while (RTTimeSystemMilliTS() - StartTS < cMillies);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemMutexRequestDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ return RTSemMutexRequest(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ int rc;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ if (cMillies == RT_INDEFINITE_WAIT)
+ {
+ if (!sx_xlock_sig(&pThis->SxLock))
+ rc = VINF_SUCCESS;
+ else
+ rc = VERR_INTERRUPTED;
+ }
+ else if (!cMillies)
+ {
+ if (sx_try_xlock(&pThis->SxLock))
+ rc = VINF_SUCCESS;
+ else
+ rc = VERR_TIMEOUT;
+ }
+ /*
+ * GROSS HACK: poll implementation of timeout.
+ */
+ /** @todo Implement timeouts and interrupt checks in
+ * RTSemMutexRequestNoResume. */
+ else if (sx_try_xlock(&pThis->SxLock))
+ rc = VINF_SUCCESS;
+ else
+ {
+ uint64_t StartTS = RTTimeSystemMilliTS();
+ rc = VERR_TIMEOUT;
+ do
+ {
+ RTThreadSleep(1);
+ if (sx_try_xlock(&pThis->SxLock))
+ {
+ rc = VINF_SUCCESS;
+ break;
+ }
+ } while (RTTimeSystemMilliTS() - StartTS < cMillies);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResumeDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ return RTSemMutexRequestNoResume(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ sx_xunlock(&pThis->SxLock);
+ return VINF_SUCCESS;
+}
+
+
+
+RTDECL(bool) RTSemMutexIsOwned(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ AssertPtrReturn(pThis, false);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), false);
+
+ return sx_xlocked(&pThis->SxLock);
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/sleepqueue-r0drv-freebsd.h b/src/VBox/Runtime/r0drv/freebsd/sleepqueue-r0drv-freebsd.h
new file mode 100644
index 00000000..dc1c3307
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/sleepqueue-r0drv-freebsd.h
@@ -0,0 +1,334 @@
+/* $Id: sleepqueue-r0drv-freebsd.h $ */
+/** @file
+ * IPRT - FreeBSD Ring-0 Driver Helpers for Abstracting Sleep Queues,
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_freebsd_sleepqueue_r0drv_freebsd_h
+#define IPRT_INCLUDED_SRC_r0drv_freebsd_sleepqueue_r0drv_freebsd_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include "the-freebsd-kernel.h"
+
+#include <iprt/asm-math.h>
+#include <iprt/err.h>
+#include <iprt/string.h>
+#include <iprt/time.h>
+
+/**
+ * Kernel mode FreeBSD wait state structure.
+ */
+typedef struct RTR0SEMBSDSLEEP
+{
+ /** The absolute timeout given as nano seconds since the start of the
+ * monotonic clock. */
+ uint64_t uNsAbsTimeout;
+ /** The timeout in ticks. Updated after waiting. */
+ int iTimeout;
+ /** Set if it's an indefinite wait. */
+ bool fIndefinite;
+ /** Set if we've already timed out.
+ * Set by rtR0SemBsdWaitDoIt and read by rtR0SemBsdWaitHasTimedOut. */
+ bool fTimedOut;
+ /** Flag whether the wait was interrupted. */
+ bool fInterrupted;
+ /** flag whether the wait is interruptible or not. */
+ bool fInterruptible;
+ /** Opaque wait channel id. */
+ void *pvWaitChan;
+} RTR0SEMBSDSLEEP;
+/** Pointer to a FreeBSD wait state. */
+typedef RTR0SEMBSDSLEEP *PRTR0SEMBSDSLEEP;
+
+
+/**
+ * Updates the timeout of the FreeBSD wait.
+ *
+ * @returns RTSEMWAIT_FLAGS_INDEFINITE if the timeout value is too big.
+ * 0 otherwise
+ * @param pWait The wait structure.
+ * @param uTimeout The relative timeout in nanoseconds.
+ */
+DECLINLINE(uint32_t) rtR0SemBsdWaitUpdateTimeout(PRTR0SEMBSDSLEEP pWait, uint64_t uTimeout)
+{
+#if 0
+ struct timeval tv;
+
+ tv.tv_sec = uTimeout / UINT64_C(1000000000);
+ tv.tv_usec = (uTimeout % UINT64_C(1000000000)) / UINT64_C(1000);
+
+ pWait->iTimeout = tvtohz(&tv);
+#else
+ uint64_t cTicks = ASMMultU64ByU32DivByU32(uTimeout, hz, UINT32_C(1000000000));
+ if (cTicks >= INT_MAX)
+ return RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ pWait->iTimeout = (int)cTicks;
+#endif
+
+ return 0;
+}
+
+/**
+ * Initializes a wait.
+ *
+ * The caller MUST check the wait condition BEFORE calling this function or the
+ * timeout logic will be flawed.
+ *
+ * @returns VINF_SUCCESS or VERR_TIMEOUT.
+ * @param pWait The wait structure.
+ * @param fFlags The wait flags.
+ * @param uTimeout The timeout.
+ * @param pvWaitChan The opaque wait channel.
+ */
+DECLINLINE(int) rtR0SemBsdWaitInit(PRTR0SEMBSDSLEEP pWait, uint32_t fFlags, uint64_t uTimeout,
+ void *pvWaitChan)
+{
+ pWait->iTimeout = 0;
+ pWait->uNsAbsTimeout = 0; /* shut up gcc */
+
+ /*
+ * Process the flags and timeout.
+ */
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+/** @todo optimize: millisecs -> nanosecs -> millisec -> jiffies */
+ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
+ uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000)
+ ? uTimeout * UINT32_C(1000000)
+ : UINT64_MAX;
+ if (uTimeout == UINT64_MAX)
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ {
+ uint64_t u64Now;
+ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
+ {
+ if (uTimeout == 0)
+ return VERR_TIMEOUT;
+
+ u64Now = RTTimeSystemNanoTS();
+ if (u64Now + uTimeout < u64Now) /* overflow */
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ pWait->uNsAbsTimeout = u64Now + uTimeout;
+ }
+ else
+ {
+ u64Now = RTTimeSystemNanoTS();
+ if (u64Now >= uTimeout)
+ return VERR_TIMEOUT;
+
+ pWait->uNsAbsTimeout = uTimeout;
+ uTimeout -= u64Now; /* Get a relative value. */
+ }
+ }
+ }
+
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+ pWait->fIndefinite = false;
+ fFlags |= rtR0SemBsdWaitUpdateTimeout(pWait, uTimeout);
+ }
+
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ {
+ pWait->fIndefinite = true;
+ pWait->iTimeout = INT_MAX;
+ pWait->uNsAbsTimeout = UINT64_MAX;
+ }
+
+ pWait->fTimedOut = false;
+
+ /*
+ * Initialize the wait queue related bits.
+ */
+ pWait->fInterruptible = fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE
+ ? true : false;
+ pWait->pvWaitChan = pvWaitChan;
+ pWait->fInterrupted = false;
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Prepares the next wait.
+ *
+ * This must be called before rtR0SemBsdWaitDoIt, and the caller should check
+ * the exit conditions inbetween the two calls.
+ *
+ * @param pWait The wait structure.
+ */
+DECLINLINE(void) rtR0SemBsdWaitPrepare(PRTR0SEMBSDSLEEP pWait)
+{
+ /* Lock the queues. */
+ sleepq_lock(pWait->pvWaitChan);
+}
+
+/**
+ * Do the actual wait.
+ *
+ * @param pWait The wait structure.
+ */
+DECLINLINE(void) rtR0SemBsdWaitDoIt(PRTR0SEMBSDSLEEP pWait)
+{
+ int rcBsd;
+ int fSleepqFlags = SLEEPQ_CONDVAR;
+
+ if (pWait->fInterruptible)
+ fSleepqFlags |= SLEEPQ_INTERRUPTIBLE;
+
+ sleepq_add(pWait->pvWaitChan, NULL, "VBoxIS", fSleepqFlags, 0);
+
+ if (!pWait->fIndefinite)
+ {
+ sleepq_set_timeout(pWait->pvWaitChan, pWait->iTimeout);
+
+ if (pWait->fInterruptible)
+ rcBsd = SLEEPQ_TIMEDWAIT_SIG(pWait->pvWaitChan);
+ else
+ rcBsd = SLEEPQ_TIMEDWAIT(pWait->pvWaitChan);
+ }
+ else
+ {
+ if (pWait->fInterruptible)
+ rcBsd = SLEEPQ_WAIT_SIG(pWait->pvWaitChan);
+ else
+ {
+ rcBsd = 0;
+ SLEEPQ_WAIT(pWait->pvWaitChan);
+ }
+ }
+
+ switch (rcBsd)
+ {
+ case 0:
+ break;
+ case ERESTART:
+ {
+ if (!pWait->fIndefinite)
+ {
+ /* Recalc timeout. */
+ uint64_t u64Now = RTTimeSystemNanoTS();
+ if (u64Now >= pWait->uNsAbsTimeout)
+ pWait->fTimedOut = true;
+ else
+ {
+ u64Now = pWait->uNsAbsTimeout - u64Now;
+ rtR0SemBsdWaitUpdateTimeout(pWait, u64Now);
+ }
+ }
+ break;
+ }
+ case EWOULDBLOCK:
+ pWait->fTimedOut = true;
+ break;
+ case EINTR:
+ Assert(pWait->fInterruptible);
+ pWait->fInterrupted = true;
+ break;
+ default:
+ AssertMsgFailed(("sleepq_* -> %d\n", rcBsd));
+ break;
+ }
+}
+
+
+/**
+ * Checks if a FreeBSD wait was interrupted.
+ *
+ * @returns true / false
+ * @param pWait The wait structure.
+ * @remarks This shall be called before the first rtR0SemBsdWaitDoIt().
+ */
+DECLINLINE(bool) rtR0SemBsdWaitWasInterrupted(PRTR0SEMBSDSLEEP pWait)
+{
+ return pWait->fInterrupted;
+}
+
+
+/**
+ * Checks if a FreeBSD wait has timed out.
+ *
+ * @returns true / false
+ * @param pWait The wait structure.
+ */
+DECLINLINE(bool) rtR0SemBsdWaitHasTimedOut(PRTR0SEMBSDSLEEP pWait)
+{
+ return pWait->fTimedOut;
+}
+
+
+/**
+ * Deletes a FreeBSD wait.
+ *
+ * @param pWait The wait structure.
+ */
+DECLINLINE(void) rtR0SemBsdWaitDelete(PRTR0SEMBSDSLEEP pWait)
+{
+ sleepq_release(pWait->pvWaitChan);
+}
+
+
+/**
+ * Signals the wait channel.
+ *
+ * @param pvWaitChan The opaque wait channel handle.
+ */
+DECLINLINE(void) rtR0SemBsdSignal(void *pvWaitChan)
+{
+ sleepq_lock(pvWaitChan);
+ int fWakeupSwapProc = sleepq_signal(pvWaitChan, SLEEPQ_CONDVAR, 0, 0);
+ sleepq_release(pvWaitChan);
+ if (fWakeupSwapProc)
+ kick_proc0();
+}
+
+/**
+ * Wakes up all waiters on the wait channel.
+ *
+ * @param pvWaitChan The opaque wait channel handle.
+ */
+DECLINLINE(void) rtR0SemBsdBroadcast(void *pvWaitChan)
+{
+ sleepq_lock(pvWaitChan);
+ sleepq_broadcast(pvWaitChan, SLEEPQ_CONDVAR, 0, 0);
+#if __FreeBSD_version >= 800000 /* Broadcast releases the sleep queue lock on FreeBSD 7.x */
+ sleepq_release(pvWaitChan);
+#endif
+}
+
+/**
+ * Gets the max resolution of the timeout machinery.
+ *
+ * @returns Resolution specified in nanoseconds.
+ */
+DECLINLINE(uint32_t) rtR0SemBsdWaitGetResolution(void)
+{
+ return 1000000000 / hz; /* ns */
+}
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_freebsd_sleepqueue_r0drv_freebsd_h */
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/spinlock-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/spinlock-r0drv-freebsd.c
new file mode 100644
index 00000000..d493290a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/spinlock-r0drv-freebsd.c
@@ -0,0 +1,210 @@
+/* $Id: spinlock-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Spinlocks, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/spinlock.h>
+#include <iprt/errcore.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/thread.h>
+#include <iprt/mp.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the struct mtx type.
+ */
+typedef struct RTSPINLOCKINTERNAL
+{
+ /** Spinlock magic value (RTSPINLOCK_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The spinlock. */
+ uint32_t volatile fLocked;
+ /** Saved interrupt flag. */
+ uint32_t volatile fIntSaved;
+ /** The spinlock creation flags. */
+ uint32_t fFlags;
+#ifdef RT_MORE_STRICT
+ /** The idAssertCpu variable before acquring the lock for asserting after
+ * releasing the spinlock. */
+ RTCPUID volatile idAssertCpu;
+ /** The CPU that owns the lock. */
+ RTCPUID volatile idCpuOwner;
+#endif
+} RTSPINLOCKINTERNAL, *PRTSPINLOCKINTERNAL;
+
+
+RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER);
+
+ /*
+ * Allocate.
+ */
+ AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *));
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAllocZ(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize & return.
+ */
+ pThis->u32Magic = RTSPINLOCK_MAGIC;
+ pThis->fLocked = 0;
+ pThis->fFlags = fFlags;
+ pThis->fIntSaved = 0;
+
+ *pSpinlock = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSpinlockDestroy(RTSPINLOCK Spinlock)
+{
+ /*
+ * Validate input.
+ */
+ RT_ASSERT_INTS_ON();
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertMsgReturn(pThis->u32Magic == RTSPINLOCK_MAGIC,
+ ("Invalid spinlock %p magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Make the lock invalid and release the memory.
+ */
+ ASMAtomicIncU32(&pThis->u32Magic);
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+ for (;;)
+ {
+ uint32_t fIntSaved = ASMIntDisableFlags();
+ critical_enter();
+
+ int c = 50;
+ for (;;)
+ {
+ if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
+ {
+ RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
+ pThis->fIntSaved = fIntSaved;
+ return;
+ }
+ if (--c <= 0)
+ break;
+ cpu_spinwait();
+ }
+
+ /* Enable interrupts while we sleep. */
+ ASMSetFlags(fIntSaved);
+ critical_exit();
+ DELAY(1);
+ }
+ }
+ else
+ {
+ for (;;)
+ {
+ critical_enter();
+
+ int c = 50;
+ for (;;)
+ {
+ if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
+ {
+ RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
+ return;
+ }
+ if (--c <= 0)
+ break;
+ cpu_spinwait();
+ }
+
+ critical_exit();
+ DELAY(1);
+ }
+ }
+}
+
+
+RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE_VARS();
+
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+ RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE(pThis);
+
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+ uint32_t fIntSaved = pThis->fIntSaved;
+ pThis->fIntSaved = 0;
+ if (ASMAtomicCmpXchgU32(&pThis->fLocked, 0, 1))
+ ASMSetFlags(fIntSaved);
+ else
+ AssertMsgFailed(("Spinlock %p was not locked!\n", pThis));
+ }
+ else
+ {
+ if (!ASMAtomicCmpXchgU32(&pThis->fLocked, 0, 1))
+ AssertMsgFailed(("Spinlock %p was not locked!\n", pThis));
+ }
+
+ critical_exit();
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/the-freebsd-kernel.h b/src/VBox/Runtime/r0drv/freebsd/the-freebsd-kernel.h
new file mode 100644
index 00000000..eb4019a2
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/the-freebsd-kernel.h
@@ -0,0 +1,122 @@
+/* $Id: the-freebsd-kernel.h $ */
+/** @file
+ * IPRT - Ring-0 Driver, The FreeBSD Kernel Headers.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_freebsd_the_freebsd_kernel_h
+#define IPRT_INCLUDED_SRC_r0drv_freebsd_the_freebsd_kernel_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/types.h>
+
+/* Deal with conflicts first. */
+#include <sys/param.h>
+#undef PVM
+#include <sys/bus.h>
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/kernel.h>
+#include <sys/uio.h>
+#include <sys/libkern.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/limits.h>
+#include <sys/unistd.h>
+#include <sys/kthread.h>
+#include <sys/lock.h>
+#if __FreeBSD_version >= 1000030
+#include <sys/rwlock.h>
+#endif
+#include <sys/mutex.h>
+#include <sys/sched.h>
+#include <sys/callout.h>
+#include <sys/cpu.h>
+#include <sys/smp.h>
+#include <sys/sleepqueue.h>
+#include <sys/sx.h>
+#include <vm/vm.h>
+#include <vm/pmap.h> /* for vtophys */
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_param.h> /* KERN_SUCCESS ++ */
+#include <vm/vm_page.h>
+#include <vm/vm_phys.h> /* vm_phys_alloc_* */
+#include <vm/vm_extern.h> /* kmem_alloc_attr */
+#include <vm/vm_pageout.h> /* vm_contig_grow_cache */
+#include <sys/vmmeter.h> /* cnt */
+#include <sys/resourcevar.h>
+#include <machine/cpu.h>
+
+/**
+ * Wrappers around the sleepq_ KPI.
+ */
+#if __FreeBSD_version >= 800026
+# define SLEEPQ_TIMEDWAIT(EventInt) sleepq_timedwait(EventInt, 0)
+# define SLEEPQ_TIMEDWAIT_SIG(EventInt) sleepq_timedwait_sig(EventInt, 0)
+# define SLEEPQ_WAIT(EventInt) sleepq_wait(EventInt, 0)
+# define SLEEPQ_WAIT_SIG(EventInt) sleepq_wait_sig(EventInt, 0)
+#else
+# define SLEEPQ_TIMEDWAIT(EventInt) sleepq_timedwait(EventInt)
+# define SLEEPQ_TIMEDWAIT_SIG(EventInt) sleepq_timedwait_sig(EventInt)
+# define SLEEPQ_WAIT(EventInt) sleepq_wait(EventInt)
+# define SLEEPQ_WAIT_SIG(EventInt) sleepq_wait_sig(EventInt)
+#endif
+
+/**
+ * Our pmap_enter version
+ */
+#if __FreeBSD_version >= 701105
+# define MY_PMAP_ENTER(pPhysMap, AddrR3, pPage, fProt, fWired) \
+ pmap_enter(pPhysMap, AddrR3, VM_PROT_NONE, pPage, fProt, fWired)
+#else
+# define MY_PMAP_ENTER(pPhysMap, AddrR3, pPage, fProt, fWired) \
+ pmap_enter(pPhysMap, AddrR3, pPage, fProt, fWired)
+#endif
+
+/**
+ * Check whether we can use kmem_alloc_attr for low allocs.
+ */
+#if (__FreeBSD_version >= 900011) \
+ || (__FreeBSD_version < 900000 && __FreeBSD_version >= 800505) \
+ || (__FreeBSD_version < 800000 && __FreeBSD_version >= 703101)
+# define USE_KMEM_ALLOC_ATTR
+#endif
+
+/**
+ * Check whether we can use kmem_alloc_prot.
+ */
+#if 0 /** @todo Not available yet. */
+# define USE_KMEM_ALLOC_PROT
+#endif
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_freebsd_the_freebsd_kernel_h */
diff --git a/src/VBox/Runtime/r0drv/freebsd/thread-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/thread-r0drv-freebsd.c
new file mode 100644
index 00000000..3fac9d4b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/thread-r0drv-freebsd.c
@@ -0,0 +1,186 @@
+/* $Id: thread-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Threads (Part 1), Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (C) 2007-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mp.h>
+#include "internal/thread.h"
+
+
+RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
+{
+ return (RTNATIVETHREAD)curthread;
+}
+
+
+static int rtR0ThreadFbsdSleepCommon(RTMSINTERVAL cMillies)
+{
+ int rc;
+ int cTicks;
+
+ /*
+ * 0 ms sleep -> yield.
+ */
+ if (!cMillies)
+ {
+ RTThreadYield();
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Translate milliseconds into ticks and go to sleep.
+ */
+ if (cMillies != RT_INDEFINITE_WAIT)
+ {
+ if (hz == 1000)
+ cTicks = cMillies;
+ else if (hz == 100)
+ cTicks = cMillies / 10;
+ else
+ {
+ int64_t cTicks64 = ((uint64_t)cMillies * hz) / 1000;
+ cTicks = (int)cTicks64;
+ if (cTicks != cTicks64)
+ cTicks = INT_MAX;
+ }
+ }
+ else
+ cTicks = 0; /* requires giant lock! */
+
+ rc = tsleep((void *)RTThreadSleep,
+ PZERO | PCATCH,
+ "iprtsl", /* max 6 chars */
+ cTicks);
+ switch (rc)
+ {
+ case 0:
+ return VINF_SUCCESS;
+ case EWOULDBLOCK:
+ return VERR_TIMEOUT;
+ case EINTR:
+ case ERESTART:
+ return VERR_INTERRUPTED;
+ default:
+ AssertMsgFailed(("%d\n", rc));
+ return VERR_NO_TRANSLATION;
+ }
+}
+
+
+RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadFbsdSleepCommon(cMillies);
+}
+
+
+RTDECL(int) RTThreadSleepNoLog(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadFbsdSleepCommon(cMillies);
+}
+
+
+RTDECL(bool) RTThreadYield(void)
+{
+#if __FreeBSD_version >= 900032
+ kern_yield(curthread->td_user_pri);
+#else
+ uio_yield();
+#endif
+ return false; /** @todo figure this one ... */
+}
+
+
+RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+
+ return curthread->td_critnest == 0
+ && ASMIntAreEnabled(); /** @todo is there a native freebsd function/macro for this? */
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+
+ return curthread->td_owepreempt == 1;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
+{
+ /* yes, RTThreadPreemptIsPending is reliable. */
+ return true;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPossible(void)
+{
+ /* yes, kernel preemption is possible. */
+ return true;
+}
+
+
+RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ Assert(pState->u32Reserved == 0);
+ pState->u32Reserved = 42;
+
+ critical_enter();
+ RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
+}
+
+
+RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ Assert(pState->u32Reserved == 42);
+ pState->u32Reserved = 0;
+
+ RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
+ critical_exit();
+}
+
+
+RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD); NOREF(hThread);
+ /** @todo FreeBSD: Implement RTThreadIsInInterrupt. Required for guest
+ * additions! */
+ return !ASMIntAreEnabled();
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/thread2-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/thread2-r0drv-freebsd.c
new file mode 100644
index 00000000..0092232e
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/thread2-r0drv-freebsd.c
@@ -0,0 +1,155 @@
+/* $Id: thread2-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Threads (Part 2), Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+
+#include <iprt/thread.h>
+#include <iprt/errcore.h>
+#include <iprt/assert.h>
+
+#include "internal/thread.h"
+
+
+DECLHIDDEN(int) rtThreadNativeInit(void)
+{
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(RTTHREAD) RTThreadSelf(void)
+{
+ return rtThreadGetByNative(RTThreadNativeSelf());
+}
+
+
+DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
+{
+ int iPriority;
+
+ switch (enmType)
+ {
+ case RTTHREADTYPE_INFREQUENT_POLLER: iPriority = PZERO + 8; break;
+ case RTTHREADTYPE_EMULATION: iPriority = PZERO + 4; break;
+ case RTTHREADTYPE_DEFAULT: iPriority = PZERO; break;
+ case RTTHREADTYPE_MSG_PUMP: iPriority = PZERO - 4; break;
+ case RTTHREADTYPE_IO: iPriority = PRIBIO; break;
+ case RTTHREADTYPE_TIMER: iPriority = PRI_MIN_KERN; break;
+ default:
+ AssertMsgFailed(("enmType=%d\n", enmType));
+ return VERR_INVALID_PARAMETER;
+ }
+
+#if __FreeBSD_version < 700000
+ /* Do like they're doing in subr_ntoskrnl.c... */
+ mtx_lock_spin(&sched_lock);
+#else
+ thread_lock(curthread);
+#endif
+ sched_prio(curthread, iPriority);
+#if __FreeBSD_version < 600000
+ curthread->td_base_pri = iPriority;
+#endif
+#if __FreeBSD_version < 700000
+ mtx_unlock_spin(&sched_lock);
+#else
+ thread_unlock(curthread);
+#endif
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtThreadNativeAdopt(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+ /* There is nothing special that needs doing here, but the
+ user really better know what he's cooking. */
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtThreadNativeWaitKludge(PRTTHREADINT pThread)
+{
+ /** @todo fix RTThreadWait/RTR0Term race on freebsd. */
+ RTThreadSleep(1);
+}
+
+
+DECLHIDDEN(void) rtThreadNativeDestroy(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+}
+
+
+/**
+ * Native thread main function.
+ *
+ * @param pvThreadInt The thread structure.
+ */
+static void rtThreadNativeMain(void *pvThreadInt)
+{
+ const struct thread *Self = curthread;
+ PRTTHREADINT pThreadInt = (PRTTHREADINT)pvThreadInt;
+ int rc;
+
+ rc = rtThreadMain(pThreadInt, (RTNATIVETHREAD)Self, &pThreadInt->szName[0]);
+
+#if __FreeBSD_version >= 800002
+ kproc_exit(rc);
+#else
+ kthread_exit(rc);
+#endif
+}
+
+
+DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
+{
+ int rc;
+ struct proc *pProc;
+
+#if __FreeBSD_version >= 800002
+ rc = kproc_create(rtThreadNativeMain, pThreadInt, &pProc, RFHIGHPID, 0, "%s", pThreadInt->szName);
+#else
+ rc = kthread_create(rtThreadNativeMain, pThreadInt, &pProc, RFHIGHPID, 0, "%s", pThreadInt->szName);
+#endif
+ if (!rc)
+ {
+ *pNativeThread = (RTNATIVETHREAD)FIRST_THREAD_IN_PROC(pProc);
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = RTErrConvertFromErrno(rc);
+ return rc;
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/time-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/time-r0drv-freebsd.c
new file mode 100644
index 00000000..20720440
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/time-r0drv-freebsd.c
@@ -0,0 +1,74 @@
+/* $Id: time-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Time, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+#define RTTIME_INCL_TIMESPEC
+
+#include <iprt/time.h>
+
+
+RTDECL(uint64_t) RTTimeNanoTS(void)
+{
+ struct timespec tsp;
+ nanouptime(&tsp);
+ return tsp.tv_sec * RT_NS_1SEC_64
+ + tsp.tv_nsec;
+}
+
+
+RTDECL(uint64_t) RTTimeMilliTS(void)
+{
+ return RTTimeNanoTS() / RT_NS_1MS;
+}
+
+
+RTDECL(uint64_t) RTTimeSystemNanoTS(void)
+{
+ return RTTimeNanoTS();
+}
+
+
+RTDECL(uint64_t) RTTimeSystemMilliTS(void)
+{
+ return RTTimeMilliTS();
+}
+
+
+RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
+{
+ struct timespec tsp;
+ nanotime(&tsp);
+ return RTTimeSpecSetTimespec(pTime, &tsp);
+}
+
diff --git a/src/VBox/Runtime/r0drv/freebsd/timer-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/timer-r0drv-freebsd.c
new file mode 100644
index 00000000..0dbca47a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/freebsd/timer-r0drv-freebsd.c
@@ -0,0 +1,286 @@
+/* $Id: timer-r0drv-freebsd.c $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver, FreeBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-freebsd-kernel.h"
+
+#include <iprt/timer.h>
+#include <iprt/time.h>
+#include <iprt/spinlock.h>
+#include <iprt/err.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/alloc.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The internal representation of an FreeBSD timer handle.
+ */
+typedef struct RTTIMER
+{
+ /** Magic.
+ * This is RTTIMER_MAGIC, but changes to something else before the timer
+ * is destroyed to indicate clearly that thread should exit. */
+ uint32_t volatile u32Magic;
+ /** Flag indicating that the timer is suspended. */
+ uint8_t volatile fSuspended;
+ /** Whether the timer must run on a specific CPU or not. */
+ uint8_t fSpecificCpu;
+ /** The CPU it must run on if fSpecificCpu is set. */
+ uint32_t iCpu;
+ /** The FreeBSD callout structure. */
+ struct callout Callout;
+ /** Callback. */
+ PFNRTTIMER pfnTimer;
+ /** User argument. */
+ void *pvUser;
+ /** The timer interval. 0 if one-shot. */
+ uint64_t u64NanoInterval;
+ /** The start of the current run.
+ * This is used to calculate when the timer ought to fire the next time. */
+ uint64_t volatile u64StartTS;
+ /** The start of the current run.
+ * This is used to calculate when the timer ought to fire the next time. */
+ uint64_t volatile u64NextTS;
+ /** The current tick number (since u64StartTS). */
+ uint64_t volatile iTick;
+} RTTIMER;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void rtTimerFreeBSDCallback(void *pvTimer);
+
+
+
+RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
+{
+ *ppTimer = NULL;
+
+ /*
+ * Validate flags.
+ */
+ if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
+ return VERR_INVALID_PARAMETER;
+ if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
+ && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
+ && (fFlags & RTTIMER_FLAGS_CPU_MASK) > mp_maxid)
+ return VERR_CPU_NOT_FOUND;
+
+ /*
+ * Allocate and initialize the timer handle.
+ */
+ PRTTIMER pTimer = (PRTTIMER)RTMemAlloc(sizeof(*pTimer));
+ if (!pTimer)
+ return VERR_NO_MEMORY;
+
+ pTimer->u32Magic = RTTIMER_MAGIC;
+ pTimer->fSuspended = true;
+ pTimer->fSpecificCpu = !!(fFlags & RTTIMER_FLAGS_CPU_SPECIFIC);
+ pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK;
+ pTimer->pfnTimer = pfnTimer;
+ pTimer->pvUser = pvUser;
+ pTimer->u64NanoInterval = u64NanoInterval;
+ pTimer->u64StartTS = 0;
+ callout_init(&pTimer->Callout, CALLOUT_MPSAFE);
+
+ *ppTimer = pTimer;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Validates the timer handle.
+ *
+ * @returns true if valid, false if invalid.
+ * @param pTimer The handle.
+ */
+DECLINLINE(bool) rtTimerIsValid(PRTTIMER pTimer)
+{
+ AssertReturn(VALID_PTR(pTimer), false);
+ AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, false);
+ return true;
+}
+
+
+RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
+{
+ /* It's ok to pass NULL pointer. */
+ if (pTimer == /*NIL_RTTIMER*/ NULL)
+ return VINF_SUCCESS;
+ if (!rtTimerIsValid(pTimer))
+ return VERR_INVALID_HANDLE;
+
+ /*
+ * Free the associated resources.
+ */
+ pTimer->u32Magic++;
+ callout_stop(&pTimer->Callout);
+ RTMemFree(pTimer);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
+{
+ struct timeval tv;
+
+ if (!rtTimerIsValid(pTimer))
+ return VERR_INVALID_HANDLE;
+ if (!pTimer->fSuspended)
+ return VERR_TIMER_ACTIVE;
+ if ( pTimer->fSpecificCpu
+ && !RTMpIsCpuOnline(RTMpCpuIdFromSetIndex(pTimer->iCpu)))
+ return VERR_CPU_OFFLINE;
+
+ /*
+ * Calc when it should start firing.
+ */
+ u64First += RTTimeNanoTS();
+
+ pTimer->fSuspended = false;
+ pTimer->iTick = 0;
+ pTimer->u64StartTS = u64First;
+ pTimer->u64NextTS = u64First;
+
+ tv.tv_sec = u64First / 1000000000;
+ tv.tv_usec = (u64First % 1000000000) / 1000;
+ callout_reset(&pTimer->Callout, tvtohz(&tv), rtTimerFreeBSDCallback, pTimer);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerStop(PRTTIMER pTimer)
+{
+ if (!rtTimerIsValid(pTimer))
+ return VERR_INVALID_HANDLE;
+ if (pTimer->fSuspended)
+ return VERR_TIMER_SUSPENDED;
+
+ /*
+ * Suspend the timer.
+ */
+ pTimer->fSuspended = true;
+ callout_stop(&pTimer->Callout);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
+{
+ if (!rtTimerIsValid(pTimer))
+ return VERR_INVALID_HANDLE;
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * smp_rendezvous action callback.
+ *
+ * This will perform the timer callback if we're on the right CPU.
+ *
+ * @param pvTimer The timer.
+ */
+static void rtTimerFreeBSDIpiAction(void *pvTimer)
+{
+ PRTTIMER pTimer = (PRTTIMER)pvTimer;
+ if ( pTimer->iCpu == RTTIMER_FLAGS_CPU_MASK
+ || (u_int)pTimer->iCpu == curcpu)
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, pTimer->iTick);
+}
+
+
+static void rtTimerFreeBSDCallback(void *pvTimer)
+{
+ PRTTIMER pTimer = (PRTTIMER)pvTimer;
+
+ /* calculate and set the next timeout */
+ pTimer->iTick++;
+ if (!pTimer->u64NanoInterval)
+ {
+ pTimer->fSuspended = true;
+ callout_stop(&pTimer->Callout);
+ }
+ else
+ {
+ struct timeval tv;
+ const uint64_t u64NanoTS = RTTimeNanoTS();
+ pTimer->u64NextTS = pTimer->u64StartTS + pTimer->iTick * pTimer->u64NanoInterval;
+ if (pTimer->u64NextTS < u64NanoTS)
+ pTimer->u64NextTS = u64NanoTS + RTTimerGetSystemGranularity() / 2;
+
+ tv.tv_sec = pTimer->u64NextTS / 1000000000;
+ tv.tv_usec = (pTimer->u64NextTS % 1000000000) / 1000;
+ callout_reset(&pTimer->Callout, tvtohz(&tv), rtTimerFreeBSDCallback, pTimer);
+ }
+
+ /* callback */
+ if ( !pTimer->fSpecificCpu
+ || pTimer->iCpu == curcpu)
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, pTimer->iTick);
+ else
+ smp_rendezvous(NULL, rtTimerFreeBSDIpiAction, NULL, pvTimer);
+}
+
+
+RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
+{
+ return 1000000000 / hz; /* ns */
+}
+
+
+RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTDECL(bool) RTTimerCanDoHighResolution(void)
+{
+ return false;
+}
+
diff --git a/src/VBox/Runtime/r0drv/generic/Makefile.kup b/src/VBox/Runtime/r0drv/generic/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/Makefile.kup
diff --git a/src/VBox/Runtime/r0drv/generic/RTMpIsCpuWorkPending-r0drv-generic.cpp b/src/VBox/Runtime/r0drv/generic/RTMpIsCpuWorkPending-r0drv-generic.cpp
new file mode 100644
index 00000000..c3f155dc
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/RTMpIsCpuWorkPending-r0drv-generic.cpp
@@ -0,0 +1,45 @@
+/* $Id: RTMpIsCpuWorkPending-r0drv-generic.cpp $ */
+/** @file
+ * IPRT - RTMpIsCpuWorkPending, Generic.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/mp.h>
+#include "internal/iprt.h"
+
+
+/**
+ * Check if there's work (DPCs on Windows) pending on the current CPU.
+ *
+ * @return true if there's pending work on the current CPU, false otherwise.
+ */
+RTDECL(bool) RTMpIsCpuWorkPending(void)
+{
+ return false;
+}
+RT_EXPORT_SYMBOL(RTMpIsCpuWorkPending);
+
diff --git a/src/VBox/Runtime/r0drv/generic/RTMpOn-r0drv-generic.cpp b/src/VBox/Runtime/r0drv/generic/RTMpOn-r0drv-generic.cpp
new file mode 100644
index 00000000..d21f20d9
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/RTMpOn-r0drv-generic.cpp
@@ -0,0 +1,94 @@
+/* $Id: RTMpOn-r0drv-generic.cpp $ */
+/** @file
+ * IPRT - Multiprocessor, Ring-0 Driver, Generic Stubs.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/mp.h>
+#include "internal/iprt.h"
+
+#include <iprt/errcore.h>
+
+
+RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ NOREF(pfnWorker);
+ NOREF(pvUser1);
+ NOREF(pvUser2);
+ return VERR_NOT_SUPPORTED;
+}
+RT_EXPORT_SYMBOL(RTMpOnAll);
+
+
+RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
+{
+ return false;
+}
+RT_EXPORT_SYMBOL(RTMpOnAllIsConcurrentSafe);
+
+
+RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ NOREF(pfnWorker);
+ NOREF(pvUser1);
+ NOREF(pvUser2);
+ return VERR_NOT_SUPPORTED;
+}
+RT_EXPORT_SYMBOL(RTMpOnOthers);
+
+
+RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ NOREF(idCpu);
+ NOREF(pfnWorker);
+ NOREF(pvUser1);
+ NOREF(pvUser2);
+ return VERR_NOT_SUPPORTED;
+}
+RT_EXPORT_SYMBOL(RTMpOnSpecific);
+
+
+RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ NOREF(idCpu1);
+ NOREF(idCpu2);
+ NOREF(fFlags);
+ NOREF(pfnWorker);
+ NOREF(pvUser1);
+ NOREF(pvUser2);
+ return VERR_NOT_SUPPORTED;
+}
+RT_EXPORT_SYMBOL(RTMpOnPair);
+
+
+
+RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
+{
+ return false;
+}
+RT_EXPORT_SYMBOL(RTMpOnPairIsConcurrentExecSupported);
+
diff --git a/src/VBox/Runtime/r0drv/generic/RTMpPokeCpu-r0drv-generic.cpp b/src/VBox/Runtime/r0drv/generic/RTMpPokeCpu-r0drv-generic.cpp
new file mode 100644
index 00000000..b8d9c697
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/RTMpPokeCpu-r0drv-generic.cpp
@@ -0,0 +1,48 @@
+/* $Id: RTMpPokeCpu-r0drv-generic.cpp $ */
+/** @file
+ * IPRT - RTMpPokeCpu, Generic Implementation.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/mp.h>
+#include "internal/iprt.h"
+
+
+static DECLCALLBACK(void) rtMpNtPokeCpuDummy(RTCPUID idCpu, void *pvUser1, void *pvUser2)
+{
+ NOREF(idCpu);
+ NOREF(pvUser1);
+ NOREF(pvUser2);
+}
+
+
+RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
+{
+ return RTMpOnSpecific(idCpu, rtMpNtPokeCpuDummy, NULL, NULL);
+}
+RT_EXPORT_SYMBOL(RTMpPokeCpu);
+
diff --git a/src/VBox/Runtime/r0drv/generic/RTThreadPreemptDisable-r0drv-generic.cpp b/src/VBox/Runtime/r0drv/generic/RTThreadPreemptDisable-r0drv-generic.cpp
new file mode 100644
index 00000000..0c77e7ab
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/RTThreadPreemptDisable-r0drv-generic.cpp
@@ -0,0 +1,44 @@
+/* $Id: RTThreadPreemptDisable-r0drv-generic.cpp $ */
+/** @file
+ * IPRT - RTThreadPreemptDisable, Generic ring-0 driver implementation.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/thread.h>
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+
+
+RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ Assert(pState->u32Reserved == 0);
+ pState->u32Reserved = 42;
+}
+RT_EXPORT_SYMBOL(RTThreadPreemptDisable);
+
diff --git a/src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsEnabled-r0drv-generic.cpp b/src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsEnabled-r0drv-generic.cpp
new file mode 100644
index 00000000..43f46153
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsEnabled-r0drv-generic.cpp
@@ -0,0 +1,43 @@
+/* $Id: RTThreadPreemptIsEnabled-r0drv-generic.cpp $ */
+/** @file
+ * IPRT - RTThreadPreemptIsEnabled, Generic ring-0 driver implementation.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/thread.h>
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+
+
+RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+ return true;
+}
+RT_EXPORT_SYMBOL(RTThreadPreemptIsEnabled);
+
diff --git a/src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsPending-r0drv-generic.cpp b/src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsPending-r0drv-generic.cpp
new file mode 100644
index 00000000..87fc3c2b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsPending-r0drv-generic.cpp
@@ -0,0 +1,43 @@
+/* $Id: RTThreadPreemptIsPending-r0drv-generic.cpp $ */
+/** @file
+ * IPRT - RTThreadPreemptIsPending, Generic ring-0 driver implementation.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/thread.h>
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+
+
+RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+ return false;
+}
+RT_EXPORT_SYMBOL(RTThreadPreemptIsPending);
+
diff --git a/src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsPendingTrusty-r0drv-generic.cpp b/src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsPendingTrusty-r0drv-generic.cpp
new file mode 100644
index 00000000..4ce8883e
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/RTThreadPreemptIsPendingTrusty-r0drv-generic.cpp
@@ -0,0 +1,41 @@
+/* $Id: RTThreadPreemptIsPendingTrusty-r0drv-generic.cpp $ */
+/** @file
+ * IPRT - RTThreadPreemptIsPendingTrusty, Generic ring-0 driver implementation.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/thread.h>
+#include "internal/iprt.h"
+
+
+RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
+{
+ /* no, RTThreadPreemptIsPending is not reliable. */
+ return false;
+}
+RT_EXPORT_SYMBOL(RTThreadPreemptIsPendingTrusty);
+
diff --git a/src/VBox/Runtime/r0drv/generic/RTThreadPreemptRestore-r0drv-generic.cpp b/src/VBox/Runtime/r0drv/generic/RTThreadPreemptRestore-r0drv-generic.cpp
new file mode 100644
index 00000000..e87289b4
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/RTThreadPreemptRestore-r0drv-generic.cpp
@@ -0,0 +1,44 @@
+/* $Id: RTThreadPreemptRestore-r0drv-generic.cpp $ */
+/** @file
+ * IPRT - RTThreadPreemptRestore, Generic ring-0 driver implementation.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/thread.h>
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+
+
+RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ Assert(pState->u32Reserved == 42);
+ pState->u32Reserved = 0;
+}
+RT_EXPORT_SYMBOL(RTThreadPreemptRestore);
+
diff --git a/src/VBox/Runtime/r0drv/generic/mpnotification-r0drv-generic.cpp b/src/VBox/Runtime/r0drv/generic/mpnotification-r0drv-generic.cpp
new file mode 100644
index 00000000..5b749c5e
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/mpnotification-r0drv-generic.cpp
@@ -0,0 +1,65 @@
+/* $Id: mpnotification-r0drv-generic.cpp $ */
+/** @file
+ * IPRT - Multiprocessor Notifications, Ring-0 Driver, Generic Stubs.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/mp.h>
+#include "internal/iprt.h"
+
+#include <iprt/errcore.h>
+#include "r0drv/mp-r0drv.h"
+
+
+RTDECL(int) RTMpNotificationRegister(PFNRTMPNOTIFICATION pfnCallback, void *pvUser)
+{
+ NOREF(pfnCallback);
+ NOREF(pvUser);
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTMpNotificationRegister);
+
+
+RTDECL(int) RTMpNotificationDeregister(PFNRTMPNOTIFICATION pfnCallback, void *pvUser)
+{
+ NOREF(pfnCallback);
+ NOREF(pvUser);
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTMpNotificationDeregister);
+
+
+DECLHIDDEN(int) rtR0MpNotificationInit(void)
+{
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0MpNotificationTerm(void)
+{
+}
+
diff --git a/src/VBox/Runtime/r0drv/generic/semspinmutex-r0drv-generic.c b/src/VBox/Runtime/r0drv/generic/semspinmutex-r0drv-generic.c
new file mode 100644
index 00000000..139de6d7
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/semspinmutex-r0drv-generic.c
@@ -0,0 +1,503 @@
+/* $Id: semspinmutex-r0drv-generic.c $ */
+/** @file
+ * IPRT - Spinning Mutex Semaphores, Ring-0 Driver, Generic.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#ifdef RT_OS_WINDOWS
+# include "../nt/the-nt-kernel.h"
+#endif
+#include "internal/iprt.h"
+
+#include <iprt/semaphore.h>
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Saved state information.
+ */
+typedef struct RTSEMSPINMUTEXSTATE
+{
+ /** Saved flags register. */
+ RTCCUINTREG fSavedFlags;
+ /** Preemption state. */
+ RTTHREADPREEMPTSTATE PreemptState;
+ /** Whether to spin or sleep. */
+ bool fSpin;
+ /** Whether the flags have been saved. */
+ bool fValidFlags;
+} RTSEMSPINMUTEXSTATE;
+
+/**
+ * Spinning mutex semaphore.
+ */
+typedef struct RTSEMSPINMUTEXINTERNAL
+{
+ /** Magic value (RTSEMSPINMUTEX_MAGIC)
+ * RTCRITSECT_MAGIC is the value of an initialized & operational section. */
+ uint32_t volatile u32Magic;
+ /** Flags. This is a combination of RTSEMSPINMUTEX_FLAGS_XXX and
+ * RTSEMSPINMUTEX_INT_FLAGS_XXX. */
+ uint32_t volatile fFlags;
+ /** The owner thread.
+ * This is NIL if the semaphore is not owned by anyone. */
+ RTNATIVETHREAD volatile hOwner;
+ /** Number of threads that are fighting for the lock. */
+ int32_t volatile cLockers;
+ /** The semaphore to block on. */
+ RTSEMEVENT hEventSem;
+ /** Saved state information of the owner.
+ * This will be restored by RTSemSpinRelease. */
+ RTSEMSPINMUTEXSTATE SavedState;
+} RTSEMSPINMUTEXINTERNAL;
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/*#define RTSEMSPINMUTEX_INT_FLAGS_MUST*/
+
+/** Validates the handle, returning if invalid. */
+#define RTSEMSPINMUTEX_VALIDATE_RETURN(pThis) \
+ do \
+ { \
+ uint32_t u32Magic; \
+ AssertPtr(pThis); \
+ u32Magic = (pThis)->u32Magic; \
+ if (u32Magic != RTSEMSPINMUTEX_MAGIC) \
+ { \
+ AssertMsgFailed(("u32Magic=%#x pThis=%p\n", u32Magic, pThis)); \
+ return u32Magic == RTSEMSPINMUTEX_MAGIC_DEAD ? VERR_SEM_DESTROYED : VERR_INVALID_HANDLE; \
+ } \
+ } while (0)
+
+
+RTDECL(int) RTSemSpinMutexCreate(PRTSEMSPINMUTEX phSpinMtx, uint32_t fFlags)
+{
+ RTSEMSPINMUTEXINTERNAL *pThis;
+ int rc;
+
+ AssertReturn(!(fFlags & ~RTSEMSPINMUTEX_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
+ AssertPtr(phSpinMtx);
+
+ /*
+ * Allocate and initialize the structure.
+ */
+ pThis = (RTSEMSPINMUTEXINTERNAL *)RTMemAllocZ(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+ pThis->u32Magic = RTSEMSPINMUTEX_MAGIC;
+ pThis->fFlags = fFlags;
+ pThis->hOwner = NIL_RTNATIVETHREAD;
+ pThis->cLockers = 0;
+ rc = RTSemEventCreateEx(&pThis->hEventSem, RTSEMEVENT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ *phSpinMtx = pThis;
+ return VINF_SUCCESS;
+ }
+
+ RTMemFree(pThis);
+ return rc;
+}
+RT_EXPORT_SYMBOL(RTSemSpinMutexCreate);
+
+
+/**
+ * Helper for RTSemSpinMutexTryRequest and RTSemSpinMutexRequest.
+ *
+ * This will check the current context and see if it's usui
+ *
+ * @returns VINF_SUCCESS or VERR_SEM_BAD_CONTEXT.
+ * @param pState Output structure.
+ */
+static int rtSemSpinMutexEnter(RTSEMSPINMUTEXSTATE *pState, RTSEMSPINMUTEXINTERNAL *pThis)
+{
+#ifndef RT_OS_WINDOWS
+ RTTHREADPREEMPTSTATE const StateInit = RTTHREADPREEMPTSTATE_INITIALIZER;
+#endif
+ int rc = VINF_SUCCESS;
+
+ /** @todo Later #1: When entering in interrupt context and we're not able to
+ * wake up threads from it, we could try switch the lock into pure
+ * spinlock mode. This would require that there are no other threads
+ * currently waiting on it and that the RTSEMSPINMUTEX_FLAGS_IRQ_SAFE
+ * flag is set.
+ *
+ * Later #2: Similarly, it is possible to turn on the
+ * RTSEMSPINMUTEX_FLAGS_IRQ_SAFE at run time if we manage to grab the
+ * semaphore ownership at interrupt time. We might want to try delay the
+ * RTSEMSPINMUTEX_FLAGS_IRQ_SAFE even, since we're fine if we get it...
+ */
+
+#ifdef RT_OS_WINDOWS
+ /*
+ * NT: IRQL <= DISPATCH_LEVEL for waking up threads; IRQL < DISPATCH_LEVEL for sleeping.
+ */
+ pState->PreemptState.uchOldIrql = KeGetCurrentIrql();
+ if (pState->PreemptState.uchOldIrql > DISPATCH_LEVEL)
+ return VERR_SEM_BAD_CONTEXT;
+
+ if (pState->PreemptState.uchOldIrql >= DISPATCH_LEVEL)
+ pState->fSpin = true;
+ else
+ {
+ pState->fSpin = false;
+ KeRaiseIrql(DISPATCH_LEVEL, &pState->PreemptState.uchOldIrql);
+ Assert(pState->PreemptState.uchOldIrql < DISPATCH_LEVEL);
+ }
+
+#elif defined(RT_OS_SOLARIS)
+ /*
+ * Solaris: RTSemEventSignal will do bad stuff on S10 if interrupts are disabled.
+ */
+ if (!ASMIntAreEnabled())
+ return VERR_SEM_BAD_CONTEXT;
+
+ pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
+ if (RTThreadIsInInterrupt(NIL_RTTHREAD))
+ {
+ if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE))
+ rc = VINF_SEM_BAD_CONTEXT; /* Try, but owner might be interrupted. */
+ pState->fSpin = true;
+ }
+ pState->PreemptState = StateInit;
+ RTThreadPreemptDisable(&pState->PreemptState);
+
+#elif defined(RT_OS_LINUX) || defined(RT_OS_OS2)
+ /*
+ * OSes on which RTSemEventSignal can be called from any context.
+ */
+ pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
+ if (RTThreadIsInInterrupt(NIL_RTTHREAD))
+ {
+ if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE))
+ rc = VINF_SEM_BAD_CONTEXT; /* Try, but owner might be interrupted. */
+ pState->fSpin = true;
+ }
+ pState->PreemptState = StateInit;
+ RTThreadPreemptDisable(&pState->PreemptState);
+
+#else /* PORTME: Check for context where we cannot wake up threads. */
+ /*
+ * Default: ASSUME thread can be woken up if interrupts are enabled and
+ * we're not in an interrupt context.
+ * ASSUME that we can go to sleep if preemption is enabled.
+ */
+ if ( RTThreadIsInInterrupt(NIL_RTTHREAD)
+ || !ASMIntAreEnabled())
+ return VERR_SEM_BAD_CONTEXT;
+
+ pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
+ pState->PreemptState = StateInit;
+ RTThreadPreemptDisable(&pState->PreemptState);
+#endif
+
+ /*
+ * Disable interrupts if necessary.
+ */
+ pState->fValidFlags = !!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE);
+ if (pState->fValidFlags)
+ pState->fSavedFlags = ASMIntDisableFlags();
+ else
+ pState->fSavedFlags = 0;
+
+ return rc;
+}
+
+
+/**
+ * Helper for RTSemSpinMutexTryRequest, RTSemSpinMutexRequest and
+ * RTSemSpinMutexRelease.
+ *
+ * @param pState
+ */
+DECL_FORCE_INLINE(void) rtSemSpinMutexLeave(RTSEMSPINMUTEXSTATE *pState)
+{
+ /*
+ * Restore the interrupt flag.
+ */
+ if (pState->fValidFlags)
+ ASMSetFlags(pState->fSavedFlags);
+
+#ifdef RT_OS_WINDOWS
+ /*
+ * NT: Lower the IRQL if we raised it.
+ */
+ if (pState->PreemptState.uchOldIrql < DISPATCH_LEVEL)
+ KeLowerIrql(pState->PreemptState.uchOldIrql);
+#else
+ /*
+ * Default: Restore preemption.
+ */
+ RTThreadPreemptRestore(&pState->PreemptState);
+#endif
+}
+
+
+RTDECL(int) RTSemSpinMutexTryRequest(RTSEMSPINMUTEX hSpinMtx)
+{
+ RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
+ RTNATIVETHREAD hSelf = RTThreadNativeSelf();
+ RTSEMSPINMUTEXSTATE State;
+ bool fRc;
+ int rc;
+
+ Assert(hSelf != NIL_RTNATIVETHREAD);
+ RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
+
+ /*
+ * Check context, disable preemption and save flags if necessary.
+ */
+ rc = rtSemSpinMutexEnter(&State, pThis);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Try take the ownership.
+ */
+ ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
+ if (!fRc)
+ {
+ /* Busy, too bad. Check for attempts at nested access. */
+ rc = VERR_SEM_BUSY;
+ if (RT_UNLIKELY(pThis->hOwner == hSelf))
+ {
+ AssertMsgFailed(("%p attempt at nested access\n"));
+ rc = VERR_SEM_NESTED;
+ }
+
+ rtSemSpinMutexLeave(&State);
+ return rc;
+ }
+
+ /*
+ * We're the semaphore owner.
+ */
+ ASMAtomicIncS32(&pThis->cLockers);
+ pThis->SavedState = State;
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemSpinMutexTryRequest);
+
+
+RTDECL(int) RTSemSpinMutexRequest(RTSEMSPINMUTEX hSpinMtx)
+{
+ RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
+ RTNATIVETHREAD hSelf = RTThreadNativeSelf();
+ RTSEMSPINMUTEXSTATE State;
+ bool fRc;
+ int rc;
+
+ Assert(hSelf != NIL_RTNATIVETHREAD);
+ RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
+
+ /*
+ * Check context, disable preemption and save flags if necessary.
+ */
+ rc = rtSemSpinMutexEnter(&State, pThis);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Try take the ownership.
+ */
+ ASMAtomicIncS32(&pThis->cLockers);
+ ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
+ if (!fRc)
+ {
+ uint32_t cSpins;
+
+ /*
+ * It's busy. Check if it's an attempt at nested access.
+ */
+ if (RT_UNLIKELY(pThis->hOwner == hSelf))
+ {
+ AssertMsgFailed(("%p attempt at nested access\n"));
+ rtSemSpinMutexLeave(&State);
+ return VERR_SEM_NESTED;
+ }
+
+ /*
+ * Return if we're in interrupt context and the semaphore isn't
+ * configure to be interrupt safe.
+ */
+ if (rc == VINF_SEM_BAD_CONTEXT)
+ {
+ rtSemSpinMutexLeave(&State);
+ return VERR_SEM_BAD_CONTEXT;
+ }
+
+ /*
+ * Ok, we have to wait.
+ */
+ if (State.fSpin)
+ {
+ for (cSpins = 0; ; cSpins++)
+ {
+ ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
+ if (fRc)
+ break;
+ ASMNopPause();
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC))
+ {
+ rtSemSpinMutexLeave(&State);
+ return VERR_SEM_DESTROYED;
+ }
+
+ /*
+ * "Yield" once in a while. This may lower our IRQL/PIL which
+ * may preempting us, and it will certainly stop the hammering
+ * of hOwner for a little while.
+ */
+ if ((cSpins & 0x7f) == 0x1f)
+ {
+ rtSemSpinMutexLeave(&State);
+ rtSemSpinMutexEnter(&State, pThis);
+ Assert(State.fSpin);
+ }
+ }
+ }
+ else
+ {
+ for (cSpins = 0;; cSpins++)
+ {
+ ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
+ if (fRc)
+ break;
+ ASMNopPause();
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC))
+ {
+ rtSemSpinMutexLeave(&State);
+ return VERR_SEM_DESTROYED;
+ }
+
+ if ((cSpins & 15) == 15) /* spin a bit before going sleep (again). */
+ {
+ rtSemSpinMutexLeave(&State);
+
+ rc = RTSemEventWait(pThis->hEventSem, RT_INDEFINITE_WAIT);
+ ASMCompilerBarrier();
+ if (RT_SUCCESS(rc))
+ AssertReturn(pThis->u32Magic == RTSEMSPINMUTEX_MAGIC, VERR_SEM_DESTROYED);
+ else if (rc == VERR_INTERRUPTED)
+ AssertRC(rc); /* shouldn't happen */
+ else
+ {
+ AssertRC(rc);
+ return rc;
+ }
+
+ rc = rtSemSpinMutexEnter(&State, pThis);
+ AssertRCReturn(rc, rc);
+ Assert(!State.fSpin);
+ }
+ }
+ }
+ }
+
+ /*
+ * We're the semaphore owner.
+ */
+ pThis->SavedState = State;
+ Assert(pThis->hOwner == hSelf);
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemSpinMutexRequest);
+
+
+RTDECL(int) RTSemSpinMutexRelease(RTSEMSPINMUTEX hSpinMtx)
+{
+ RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
+ RTNATIVETHREAD hSelf = RTThreadNativeSelf();
+ uint32_t cLockers;
+ RTSEMSPINMUTEXSTATE State;
+ bool fRc;
+
+ Assert(hSelf != NIL_RTNATIVETHREAD);
+ RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
+
+ /*
+ * Get the saved state and try release the semaphore.
+ */
+ State = pThis->SavedState;
+ ASMCompilerBarrier();
+ ASMAtomicCmpXchgHandle(&pThis->hOwner, NIL_RTNATIVETHREAD, hSelf, fRc);
+ AssertMsgReturn(fRc,
+ ("hOwner=%p hSelf=%p cLockers=%d\n", pThis->hOwner, hSelf, pThis->cLockers),
+ VERR_NOT_OWNER);
+
+ cLockers = ASMAtomicDecS32(&pThis->cLockers);
+ rtSemSpinMutexLeave(&State);
+ if (cLockers > 0)
+ {
+ int rc = RTSemEventSignal(pThis->hEventSem);
+ AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc));
+ }
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemSpinMutexRelease);
+
+
+RTDECL(int) RTSemSpinMutexDestroy(RTSEMSPINMUTEX hSpinMtx)
+{
+ RTSEMSPINMUTEXINTERNAL *pThis;
+ RTSEMEVENT hEventSem;
+ int rc;
+
+ if (hSpinMtx == NIL_RTSEMSPINMUTEX)
+ return VINF_SUCCESS;
+ pThis = hSpinMtx;
+ RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
+
+ /* No destruction races allowed! */
+ AssertMsg( pThis->cLockers == 0
+ && pThis->hOwner == NIL_RTNATIVETHREAD,
+ ("pThis=%p cLockers=%d hOwner=%p\n", pThis, pThis->cLockers, pThis->hOwner));
+
+ /*
+ * Invalidate the structure, free the mutex and free the structure.
+ */
+ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMSPINMUTEX_MAGIC_DEAD);
+ hEventSem = pThis->hEventSem;
+ pThis->hEventSem = NIL_RTSEMEVENT;
+ rc = RTSemEventDestroy(hEventSem); AssertRC(rc);
+
+ RTMemFree(pThis);
+ return rc;
+}
+RT_EXPORT_SYMBOL(RTSemSpinMutexDestroy);
+
diff --git a/src/VBox/Runtime/r0drv/generic/threadctxhooks-r0drv-generic.cpp b/src/VBox/Runtime/r0drv/generic/threadctxhooks-r0drv-generic.cpp
new file mode 100644
index 00000000..9b19ed92
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/generic/threadctxhooks-r0drv-generic.cpp
@@ -0,0 +1,73 @@
+/* $Id: threadctxhooks-r0drv-generic.cpp $ */
+/** @file
+ * IPRT - Thread Context Switching Hook, Ring-0 Driver, Generic.
+ */
+
+/*
+ * Copyright (C) 2013-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/thread.h>
+#include <iprt/errcore.h>
+
+#include "internal/iprt.h"
+
+RTDECL(int) RTThreadCtxHookCreate(PRTTHREADCTXHOOK phCtxHook, uint32_t fFlags, PFNRTTHREADCTXHOOK pfnCallback, void *pvUser)
+{
+ RT_NOREF4(phCtxHook, fFlags, pfnCallback, pvUser);
+ return VERR_NOT_SUPPORTED;
+}
+RT_EXPORT_SYMBOL(RTThreadCtxHookCreate);
+
+
+RTDECL(int) RTThreadCtxHookDestroy(RTTHREADCTXHOOK hCtxHook)
+{
+ return hCtxHook == NIL_RTTHREADCTXHOOK ? VINF_SUCCESS : VERR_INVALID_HANDLE;
+}
+RT_EXPORT_SYMBOL(RTThreadCtxHookDestroy);
+
+
+RTDECL(int) RTThreadCtxHookEnable(RTTHREADCTXHOOK hCtxHook)
+{
+ NOREF(hCtxHook);
+ return VERR_NOT_SUPPORTED;
+}
+RT_EXPORT_SYMBOL(RTThreadCtxHookEnable);
+
+
+RTDECL(int) RTThreadCtxHookDisable(RTTHREADCTXHOOK hCtxHook)
+{
+ NOREF(hCtxHook);
+ return VERR_NOT_SUPPORTED;
+}
+RT_EXPORT_SYMBOL(RTThreadCtxHookDisable);
+
+
+RTDECL(bool) RTThreadCtxHookIsEnabled(RTTHREADCTXHOOK hCtxHook)
+{
+ NOREF(hCtxHook);
+ return false;
+}
+RT_EXPORT_SYMBOL(RTThreadCtxHookIsEnabled);
+
diff --git a/src/VBox/Runtime/r0drv/haiku/Makefile.kup b/src/VBox/Runtime/r0drv/haiku/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/Makefile.kup
diff --git a/src/VBox/Runtime/r0drv/haiku/RTLogWriteDebugger-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/RTLogWriteDebugger-r0drv-haiku.c
new file mode 100644
index 00000000..6313e726
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/RTLogWriteDebugger-r0drv-haiku.c
@@ -0,0 +1,42 @@
+/* $Id: RTLogWriteDebugger-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Log To Debugger, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/log.h>
+
+
+RTDECL(void) RTLogWriteDebugger(const char *pch, size_t cb)
+{
+ /** @todo implement this */
+ /*kprintf("%.*s", (int)cb, pch);*/
+ return;
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/RTLogWriteStdOut-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/RTLogWriteStdOut-r0drv-haiku.c
new file mode 100644
index 00000000..c53065ce
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/RTLogWriteStdOut-r0drv-haiku.c
@@ -0,0 +1,41 @@
+/* $Id: RTLogWriteStdOut-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Log To StdOut, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/log.h>
+
+
+RTDECL(void) RTLogWriteStdOut(const char *pch, size_t cb)
+{
+ dprintf("%.*s", (int)cb, pch);
+ return;
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/alloc-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/alloc-r0drv-haiku.c
new file mode 100644
index 00000000..0c3942bc
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/alloc-r0drv-haiku.c
@@ -0,0 +1,124 @@
+/* $Id: alloc-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+#include <iprt/log.h>
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/thread.h>
+#include "r0drv/alloc-r0drv.h"
+
+
+/**
+ * OS specific allocation function.
+ */
+int rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
+{
+ if (RT_UNLIKELY(fFlags & RTMEMHDR_FLAG_ANY_CTX))
+ return VERR_NOT_SUPPORTED;
+
+ PRTMEMHDR pHdr = (PRTMEMHDR)malloc(cb + sizeof(*pHdr));
+ if (RT_UNLIKELY(!pHdr))
+ {
+ LogRel(("rtR0MemAllocEx(%u, %#x) failed\n",(unsigned)cb + sizeof(*pHdr), fFlags));
+ return VERR_NO_MEMORY;
+ }
+
+ pHdr->u32Magic = RTMEMHDR_MAGIC;
+ pHdr->fFlags = fFlags;
+ pHdr->cb = cb;
+ pHdr->cbReq = cb;
+ *ppHdr = pHdr;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * OS specific free function.
+ */
+void rtR0MemFree(PRTMEMHDR pHdr)
+{
+ pHdr->u32Magic += 1;
+ free(pHdr);
+}
+
+
+RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb) RT_NO_THROW_DEF
+{
+ /*
+ * Validate input.
+ */
+ AssertPtr(pPhys);
+ Assert(cb > 0);
+ RT_ASSERT_PREEMPTIBLE();
+
+ /*
+ * Allocate the memory and ensure that the API is still providing
+ * memory that's always below 4GB.
+ */
+ cb = RT_ALIGN_Z(cb, PAGE_SIZE);
+ void *pv;
+ area_id area = create_area("VirtualBox Contig Alloc", &pv, B_ANY_KERNEL_ADDRESS, cb, B_32_BIT_CONTIGUOUS,
+ B_READ_AREA | B_WRITE_AREA);
+ if (area >= 0)
+ {
+ physical_entry physMap[2];
+ if (get_memory_map(pv, cb, physMap, 2)>= B_OK)
+ {
+ *pPhys = physMap[0].address;
+ return pv;
+ }
+ delete_area(area);
+ AssertMsgFailed(("Cannot get_memory_map for contig alloc! cb=%u\n",(unsigned)cb));
+ }
+ else
+ AssertMsgFailed(("Cannot create_area for contig alloc! cb=%u error=0x%08lx\n",(unsigned)cb, area));
+ return NULL;
+}
+
+
+RTR0DECL(void) RTMemContFree(void *pv, size_t cb) RT_NO_THROW_DEF
+{
+ RT_ASSERT_PREEMPTIBLE();
+ if (pv)
+ {
+ Assert(cb > 0);
+
+ area_id area = area_for(pv);
+ if (area >= B_OK)
+ delete_area(area);
+ else
+ AssertMsgFailed(("Cannot find area to delete! cb=%u error=0x%08lx\n",(unsigned)cb, area));
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/assert-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/assert-r0drv-haiku.c
new file mode 100644
index 00000000..376530d2
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/assert-r0drv-haiku.c
@@ -0,0 +1,68 @@
+/* $Id: assert-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Assertion Workers, Ring-0 Drivers, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/assert.h>
+
+#include <iprt/asm.h>
+#include <iprt/log.h>
+#include <iprt/stdarg.h>
+#include <iprt/string.h>
+
+#include "internal/assert.h"
+
+
+void rtR0AssertNativeMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
+{
+ dprintf("\r\n!!Assertion Failed!!\r\n"
+ "Expression: %s\r\n"
+ "Location : %s(%d) %s\r\n",
+ pszExpr, pszFile, uLine, pszFunction);
+}
+
+
+void rtR0AssertNativeMsg2V(bool fInitial, const char *pszFormat, va_list va)
+{
+ char szMsg[256];
+
+ RTStrPrintfV(szMsg, sizeof(szMsg) - 1, pszFormat, va);
+ szMsg[sizeof(szMsg) - 1] = '\0';
+ dprintf("%s", szMsg);
+
+ NOREF(fInitial);
+}
+
+
+RTR0DECL(void) RTR0AssertPanicSystem(void)
+{
+ panic("%s%s", g_szRTAssertMsg1, g_szRTAssertMsg2);
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/initterm-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/initterm-r0drv-haiku.c
new file mode 100644
index 00000000..b86d64e1
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/initterm-r0drv-haiku.c
@@ -0,0 +1,48 @@
+/* $Id: initterm-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Initialization & Termination, R0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/errcore.h>
+#include <iprt/assert.h>
+#include "internal/initterm.h"
+
+
+int rtR0InitNative(void)
+{
+ return VINF_SUCCESS;
+}
+
+
+void rtR0TermNative(void)
+{
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/memobj-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/memobj-r0drv-haiku.c
new file mode 100644
index 00000000..0f7889e3
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/memobj-r0drv-haiku.c
@@ -0,0 +1,664 @@
+/* $Id: memobj-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+
+#include <iprt/memobj.h>
+#include <iprt/mem.h>
+#include <iprt/err.h>
+#include <iprt/assert.h>
+#include <iprt/log.h>
+#include <iprt/param.h>
+#include <iprt/process.h>
+#include "internal/memobj.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The Haiku version of the memory object structure.
+ */
+typedef struct RTR0MEMOBJHAIKU
+{
+ /** The core structure. */
+ RTR0MEMOBJINTERNAL Core;
+ /** Area identifier */
+ area_id AreaId;
+} RTR0MEMOBJHAIKU, *PRTR0MEMOBJHAIKU;
+
+
+//MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj");
+#if 0
+/**
+ * Gets the virtual memory map the specified object is mapped into.
+ *
+ * @returns VM map handle on success, NULL if no map.
+ * @param pMem The memory object.
+ */
+static vm_map_t rtR0MemObjHaikuGetMap(PRTR0MEMOBJINTERNAL pMem)
+{
+ switch (pMem->enmType)
+ {
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_CONT:
+ return kernel_map;
+
+ case RTR0MEMOBJTYPE_PHYS:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ return NULL; /* pretend these have no mapping atm. */
+
+ case RTR0MEMOBJTYPE_LOCK:
+ return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
+
+ default:
+ return NULL;
+ }
+}
+#endif
+
+
+int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
+{
+ PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)pMem;
+ int rc = B_OK;
+
+ switch (pMemHaiku->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_CONT:
+ case RTR0MEMOBJTYPE_MAPPING:
+ case RTR0MEMOBJTYPE_PHYS:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ {
+ if (pMemHaiku->AreaId > -1)
+ rc = delete_area(pMemHaiku->AreaId);
+
+ AssertMsg(rc == B_OK, ("%#x", rc));
+ break;
+ }
+
+ case RTR0MEMOBJTYPE_LOCK:
+ {
+ team_id team = B_SYSTEM_TEAM;
+
+ if (pMemHaiku->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
+ team = ((team_id)pMemHaiku->Core.u.Lock.R0Process);
+
+ rc = unlock_memory_etc(team, pMemHaiku->Core.pv, pMemHaiku->Core.cb, B_READ_DEVICE);
+ AssertMsg(rc == B_OK, ("%#x", rc));
+ break;
+ }
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ {
+ team_id team = B_SYSTEM_TEAM;
+ if (pMemHaiku->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
+ team = ((team_id)pMemHaiku->Core.u.Lock.R0Process);
+
+ rc = vm_unreserve_address_range(team, pMemHaiku->Core.pv, pMemHaiku->Core.cb);
+ AssertMsg(rc == B_OK, ("%#x", rc));
+ break;
+ }
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", pMemHaiku->Core.enmType));
+ return VERR_INTERNAL_ERROR;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+static int rtR0MemObjNativeAllocArea(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
+ bool fExecutable, RTR0MEMOBJTYPE type, RTHCPHYS PhysHighest, size_t uAlignment)
+{
+ NOREF(fExecutable);
+
+ int rc;
+ void *pvMap = NULL;
+ const char *pszName = NULL;
+ uint32 addressSpec = B_ANY_KERNEL_ADDRESS;
+ uint32 fLock = ~0U;
+ LogFlowFunc(("ppMem=%p cb=%u, fExecutable=%s, type=%08x, PhysHighest=%RX64 uAlignment=%u\n", ppMem,(unsigned)cb,
+ fExecutable ? "true" : "false", type, PhysHighest,(unsigned)uAlignment));
+
+ switch (type)
+ {
+ case RTR0MEMOBJTYPE_PAGE:
+ pszName = "IPRT R0MemObj Alloc";
+ fLock = B_FULL_LOCK;
+ break;
+ case RTR0MEMOBJTYPE_LOW:
+ pszName = "IPRT R0MemObj AllocLow";
+ fLock = B_32_BIT_FULL_LOCK;
+ break;
+ case RTR0MEMOBJTYPE_CONT:
+ pszName = "IPRT R0MemObj AllocCont";
+ fLock = B_32_BIT_CONTIGUOUS;
+ break;
+#if 0
+ case RTR0MEMOBJTYPE_MAPPING:
+ pszName = "IPRT R0MemObj Mapping";
+ fLock = B_FULL_LOCK;
+ break;
+#endif
+ case RTR0MEMOBJTYPE_PHYS:
+ /** @todo alignment */
+ if (uAlignment != PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+ /** @todo r=ramshankar: no 'break' here?? */
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ pszName = "IPRT R0MemObj AllocPhys";
+ fLock = (PhysHighest < _4G ? B_LOMEM : B_32_BIT_CONTIGUOUS);
+ break;
+#if 0
+ case RTR0MEMOBJTYPE_LOCK:
+ break;
+#endif
+ default:
+ return VERR_INTERNAL_ERROR;
+ }
+
+ /* Create the object. */
+ PRTR0MEMOBJHAIKU pMemHaiku;
+ pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(RTR0MEMOBJHAIKU), type, NULL, cb);
+ if (RT_UNLIKELY(!pMemHaiku))
+ return VERR_NO_MEMORY;
+
+ rc = pMemHaiku->AreaId = create_area(pszName, &pvMap, addressSpec, cb, fLock, B_READ_AREA | B_WRITE_AREA);
+ if (pMemHaiku->AreaId >= 0)
+ {
+ physical_entry physMap[2];
+ pMemHaiku->Core.pv = pvMap; /* store start address */
+ switch (type)
+ {
+ case RTR0MEMOBJTYPE_CONT:
+ rc = get_memory_map(pvMap, cb, physMap, 2);
+ if (rc == B_OK)
+ pMemHaiku->Core.u.Cont.Phys = physMap[0].address;
+ break;
+
+ case RTR0MEMOBJTYPE_PHYS:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ rc = get_memory_map(pvMap, cb, physMap, 2);
+ if (rc == B_OK)
+ {
+ pMemHaiku->Core.u.Phys.PhysBase = physMap[0].address;
+ pMemHaiku->Core.u.Phys.fAllocated = true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ if (rc >= B_OK)
+ {
+ *ppMem = &pMemHaiku->Core;
+ return VINF_SUCCESS;
+ }
+
+ delete_area(pMemHaiku->AreaId);
+ }
+
+ rtR0MemObjDelete(&pMemHaiku->Core);
+ return RTErrConvertFromHaikuKernReturn(rc);
+}
+
+
+int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ return rtR0MemObjNativeAllocArea(ppMem, cb, fExecutable, RTR0MEMOBJTYPE_PAGE, 0 /* PhysHighest */, 0 /* uAlignment */);
+}
+
+
+int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ return rtR0MemObjNativeAllocArea(ppMem, cb, fExecutable, RTR0MEMOBJTYPE_LOW, 0 /* PhysHighest */, 0 /* uAlignment */);
+}
+
+
+int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ return rtR0MemObjNativeAllocArea(ppMem, cb, fExecutable, RTR0MEMOBJTYPE_CONT, 0 /* PhysHighest */, 0 /* uAlignment */);
+}
+
+int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
+{
+ return rtR0MemObjNativeAllocArea(ppMem, cb, false, RTR0MEMOBJTYPE_PHYS, PhysHighest, uAlignment);
+}
+
+
+int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
+{
+ return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest, PAGE_SIZE);
+}
+
+
+int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
+{
+ AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
+ LogFlowFunc(("ppMem=%p Phys=%08x cb=%u uCachePolicy=%x\n", ppMem, Phys,(unsigned)cb, uCachePolicy));
+
+ /* Create the object. */
+ PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(*pMemHaiku), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (!pMemHaiku)
+ return VERR_NO_MEMORY;
+
+ /* There is no allocation here, it needs to be mapped somewhere first. */
+ pMemHaiku->AreaId = -1;
+ pMemHaiku->Core.u.Phys.fAllocated = false;
+ pMemHaiku->Core.u.Phys.PhysBase = Phys;
+ pMemHaiku->Core.u.Phys.uCachePolicy = uCachePolicy;
+ *ppMem = &pMemHaiku->Core;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker locking the memory in either kernel or user maps.
+ *
+ * @returns IPRT status code.
+ * @param ppMem Where to store the allocated memory object.
+ * @param pvStart The starting address.
+ * @param cb The size of the block.
+ * @param fAccess The mapping protection to apply.
+ * @param R0Process The process to map the memory to (use NIL_RTR0PROCESS
+ * for the kernel)
+ * @param fFlags Memory flags (B_READ_DEVICE indicates the memory is
+ * intended to be written from a "device").
+ */
+static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvStart, size_t cb, uint32_t fAccess,
+ RTR0PROCESS R0Process, int fFlags)
+{
+ NOREF(fAccess);
+ int rc;
+ team_id TeamId = B_SYSTEM_TEAM;
+
+ LogFlowFunc(("ppMem=%p pvStart=%p cb=%u fAccess=%x R0Process=%d fFlags=%x\n", ppMem, pvStart, cb, fAccess, R0Process,
+ fFlags));
+
+ /* Create the object. */
+ PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(*pMemHaiku), RTR0MEMOBJTYPE_LOCK, pvStart, cb);
+ if (RT_UNLIKELY(!pMemHaiku))
+ return VERR_NO_MEMORY;
+
+ if (R0Process != NIL_RTR0PROCESS)
+ TeamId = (team_id)R0Process;
+ rc = lock_memory_etc(TeamId, pvStart, cb, fFlags);
+ if (rc == B_OK)
+ {
+ pMemHaiku->AreaId = -1;
+ pMemHaiku->Core.u.Lock.R0Process = R0Process;
+ *ppMem = &pMemHaiku->Core;
+ return VINF_SUCCESS;
+ }
+ rtR0MemObjDelete(&pMemHaiku->Core);
+ return RTErrConvertFromHaikuKernReturn(rc);
+}
+
+
+int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
+{
+ return rtR0MemObjNativeLockInMap(ppMem, (void *)R3Ptr, cb, fAccess, R0Process, B_READ_DEVICE);
+}
+
+
+int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
+{
+ return rtR0MemObjNativeLockInMap(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS, B_READ_DEVICE);
+}
+
+
+#if 0
+/** @todo Reserve address space */
+/**
+ * Worker for the two virtual address space reservers.
+ *
+ * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
+ */
+static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
+ RTR0PROCESS R0Process)
+{
+ int rc;
+ team_id TeamId = B_SYSTEM_TEAM;
+
+ LogFlowFunc(("ppMem=%p pvFixed=%p cb=%u uAlignment=%u R0Process=%d\n", ppMem, pvFixed, (unsigned)cb, uAlignment, R0Process));
+
+ if (R0Process != NIL_RTR0PROCESS)
+ team = (team_id)R0Process;
+
+ /* Check that the specified alignment is supported. */
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /* Create the object. */
+ PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(*pMemHaiku), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
+ if (!pMemHaiku)
+ return VERR_NO_MEMORY;
+
+ /* Ask the kernel to reserve the address range. */
+ //XXX: vm_reserve_address_range ?
+ return VERR_NOT_SUPPORTED;
+}
+#endif
+
+
+int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
+ unsigned fProt, size_t offSub, size_t cbSub)
+{
+ PRTR0MEMOBJHAIKU pMemToMapHaiku = (PRTR0MEMOBJHAIKU)pMemToMap;
+ PRTR0MEMOBJHAIKU pMemHaiku;
+ area_id area = -1;
+ void *pvMap = pvFixed;
+ uint32 uAddrSpec = B_EXACT_ADDRESS;
+ uint32 fProtect = 0;
+ int rc = VERR_MAP_FAILED;
+ AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
+ AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
+#if 0
+ /** @todo r=ramshankar: Wrong format specifiers, fix later! */
+ dprintf("%s(%p, %p, %p, %d, %x, %u, %u)\n", __FUNCTION__, ppMem, pMemToMap, pvFixed, uAlignment,
+ fProt, offSub, cbSub);
+#endif
+ /* Check that the specified alignment is supported. */
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /* We can't map anything to the first page, sorry. */
+ if (pvFixed == 0)
+ return VERR_NOT_SUPPORTED;
+
+ if (fProt & RTMEM_PROT_READ)
+ fProtect |= B_KERNEL_READ_AREA;
+ if (fProt & RTMEM_PROT_WRITE)
+ fProtect |= B_KERNEL_WRITE_AREA;
+
+ /*
+ * Either the object we map has an area associated with, which we can clone,
+ * or it's a physical address range which we must map.
+ */
+ if (pMemToMapHaiku->AreaId > -1)
+ {
+ if (pvFixed == (void *)-1)
+ uAddrSpec = B_ANY_KERNEL_ADDRESS;
+
+ rc = area = clone_area("IPRT R0MemObj MapKernel", &pvMap, uAddrSpec, fProtect, pMemToMapHaiku->AreaId);
+ LogFlow(("rtR0MemObjNativeMapKernel: clone_area uAddrSpec=%d fProtect=%x AreaId=%d rc=%d\n", uAddrSpec, fProtect,
+ pMemToMapHaiku->AreaId, rc));
+ }
+ else if (pMemToMapHaiku->Core.enmType == RTR0MEMOBJTYPE_PHYS)
+ {
+ /* map_physical_memory() won't let you choose where. */
+ if (pvFixed != (void *)-1)
+ return VERR_NOT_SUPPORTED;
+ uAddrSpec = B_ANY_KERNEL_ADDRESS;
+
+ rc = area = map_physical_memory("IPRT R0MemObj MapKernelPhys", (phys_addr_t)pMemToMapHaiku->Core.u.Phys.PhysBase,
+ pMemToMapHaiku->Core.cb, uAddrSpec, fProtect, &pvMap);
+ }
+ else
+ return VERR_NOT_SUPPORTED;
+
+ if (rc >= B_OK)
+ {
+ /* Create the object. */
+ pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(RTR0MEMOBJHAIKU), RTR0MEMOBJTYPE_MAPPING, pvMap,
+ pMemToMapHaiku->Core.cb);
+ if (RT_UNLIKELY(!pMemHaiku))
+ return VERR_NO_MEMORY;
+
+ pMemHaiku->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
+ pMemHaiku->Core.pv = pvMap;
+ pMemHaiku->AreaId = area;
+ *ppMem = &pMemHaiku->Core;
+ return VINF_SUCCESS;
+ }
+ rc = VERR_MAP_FAILED;
+
+ /** @todo finish the implementation. */
+
+ rtR0MemObjDelete(&pMemHaiku->Core);
+ return rc;
+}
+
+
+int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
+ unsigned fProt, RTR0PROCESS R0Process)
+{
+#if 0
+ /*
+ * Check for unsupported stuff.
+ */
+ AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
+ AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ int rc;
+ PRTR0MEMOBJHAIKU pMemToMapHaiku = (PRTR0MEMOBJHAIKU)pMemToMap;
+ struct proc *pProc = (struct proc *)R0Process;
+ struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
+
+ /* calc protection */
+ vm_prot_t ProtectionFlags = 0;
+ if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
+ ProtectionFlags = VM_PROT_NONE;
+ if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
+ ProtectionFlags |= VM_PROT_READ;
+ if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
+ ProtectionFlags |= VM_PROT_WRITE;
+ if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
+ ProtectionFlags |= VM_PROT_EXECUTE;
+
+ /* calc mapping address */
+ PROC_LOCK(pProc);
+ vm_offset_t AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
+ PROC_UNLOCK(pProc);
+
+ /* Insert the object in the map. */
+ rc = vm_map_find(pProcMap, /* Map to insert the object in */
+ NULL, /* Object to map */
+ 0, /* Start offset in the object */
+ &AddrR3, /* Start address IN/OUT */
+ pMemToMap->cb, /* Size of the mapping */
+ TRUE, /* Whether a suitable address should be searched for first */
+ ProtectionFlags, /* protection flags */
+ VM_PROT_ALL, /* Maximum protection flags */
+ 0); /* Copy on write */
+
+ /* Map the memory page by page into the destination map. */
+ if (rc == KERN_SUCCESS)
+ {
+ size_t cPages = pMemToMap->cb >> PAGE_SHIFT;;
+ pmap_t pPhysicalMap = pProcMap->pmap;
+ vm_offset_t AddrR3Dst = AddrR3;
+
+ if ( pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS
+ || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC
+ || pMemToMap->enmType == RTR0MEMOBJTYPE_PAGE)
+ {
+ /* Mapping physical allocations */
+ Assert(cPages == pMemToMapHaiku->u.Phys.cPages);
+
+ /* Insert the memory page by page into the mapping. */
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ vm_page_t pPage = pMemToMapHaiku->u.Phys.apPages[iPage];
+
+ MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
+ AddrR3Dst += PAGE_SIZE;
+ }
+ }
+ else
+ {
+ /* Mapping cont or low memory types */
+ vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv;
+
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ {
+ vm_page_t pPage = PHYS_TO_VM_PAGE(vtophys(AddrToMap));
+
+ MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
+ AddrR3Dst += PAGE_SIZE;
+ AddrToMap += PAGE_SIZE;
+ }
+ }
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Create a mapping object for it.
+ */
+ PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(RTR0MEMOBJHAIKU),
+ RTR0MEMOBJTYPE_MAPPING,
+ (void *)AddrR3,
+ pMemToMap->cb);
+ if (pMemHaiku)
+ {
+ Assert((vm_offset_t)pMemHaiku->Core.pv == AddrR3);
+ pMemHaiku->Core.u.Mapping.R0Process = R0Process;
+ *ppMem = &pMemHaiku->Core;
+ return VINF_SUCCESS;
+ }
+
+ rc = vm_map_remove(pProcMap, ((vm_offset_t)AddrR3), ((vm_offset_t)AddrR3) + pMemToMap->cb);
+ AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
+ }
+#endif
+ return VERR_NOT_SUPPORTED;
+}
+
+
+int rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
+{
+ PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)pMem;
+ status_t rc;
+
+ /** @todo r=ramshankar: Validate objects */
+
+ LogFlow(("rtR0MemObjNativeGetPagePhysAddr: pMem=%p enmType=%x iPage=%u\n", pMem, pMemHaiku->Core.enmType,(unsigned)iPage));
+
+ switch (pMemHaiku->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_LOCK:
+ {
+ team_id TeamId = B_SYSTEM_TEAM;
+ physical_entry aPhysMap[2];
+ int32 cPhysMap = 2; /** @todo r=ramshankar: why not use RT_ELEMENTS? */
+
+ if (pMemHaiku->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
+ TeamId = (team_id)pMemHaiku->Core.u.Lock.R0Process;
+ void *pb = pMemHaiku->Core.pv + (iPage << PAGE_SHIFT);
+
+ rc = get_memory_map_etc(TeamId, pb, B_PAGE_SIZE, aPhysMap, &cPhysMap);
+ if (rc < B_OK || cPhysMap < 1)
+ return NIL_RTHCPHYS;
+
+ return aPhysMap[0].address;
+ }
+
+#if 0
+ case RTR0MEMOBJTYPE_MAPPING:
+ {
+ vm_offset_t pb = (vm_offset_t)pMemHaiku->Core.pv + (iPage << PAGE_SHIFT);
+
+ if (pMemHaiku->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
+ {
+ struct proc *pProc = (struct proc *)pMemHaiku->Core.u.Mapping.R0Process;
+ struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
+ pmap_t pPhysicalMap = pProcMap->pmap;
+
+ return pmap_extract(pPhysicalMap, pb);
+ }
+ return vtophys(pb);
+ }
+#endif
+ case RTR0MEMOBJTYPE_CONT:
+ return pMemHaiku->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
+
+ case RTR0MEMOBJTYPE_PHYS:
+ return pMemHaiku->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
+
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ {
+ team_id TeamId = B_SYSTEM_TEAM;
+ physical_entry aPhysMap[2];
+ int32 cPhysMap = 2; /** @todo r=ramshankar: why not use RT_ELEMENTS? */
+
+ void *pb = pMemHaiku->Core.pv + (iPage << PAGE_SHIFT);
+ rc = get_memory_map_etc(TeamId, pb, B_PAGE_SIZE, aPhysMap, &cPhysMap);
+ if (rc < B_OK || cPhysMap < 1)
+ return NIL_RTHCPHYS;
+
+ return aPhysMap[0].address;
+ }
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ default:
+ return NIL_RTHCPHYS;
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/mp-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/mp-r0drv-haiku.c
new file mode 100644
index 00000000..7d0aa5cc
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/mp-r0drv-haiku.c
@@ -0,0 +1,236 @@
+/* $Id: mp-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Multiprocessor, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+
+#include <iprt/mp.h>
+#include <iprt/err.h>
+#include <iprt/asm.h>
+#include <iprt/cpuset.h>
+#include "r0drv/mp-r0drv.h"
+
+
+RTDECL(RTCPUID) RTMpCpuId(void)
+{
+ return smp_get_current_cpu();
+}
+
+
+RTDECL(int) RTMpCurSetIndex(void)
+{
+ return smp_get_current_cpu();
+}
+
+
+RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
+{
+ return *pidCpu = smp_get_current_cpu();
+}
+
+
+RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
+{
+ return idCpu < smp_get_num_cpus() ? (int)idCpu : -1;
+}
+
+
+RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
+{
+ return (unsigned)iCpu < smp_get_num_cpus() ? (RTCPUID)iCpu : NIL_RTCPUID;
+}
+
+
+RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
+{
+ return smp_get_num_cpus() - 1;
+}
+
+
+RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
+{
+ return idCpu < smp_get_num_cpus();
+}
+
+
+RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
+{
+ RTCPUID idCpu;
+
+ RTCpuSetEmpty(pSet);
+ idCpu = RTMpGetMaxCpuId();
+ do
+ {
+ if (RTMpIsCpuPossible(idCpu))
+ RTCpuSetAdd(pSet, idCpu);
+ } while (idCpu-- > 0);
+ return pSet;
+}
+
+
+RTDECL(RTCPUID) RTMpGetCount(void)
+{
+ return smp_get_num_cpus();
+}
+
+
+RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
+{
+ return idCpu < smp_get_num_cpus();
+ /** @todo FixMe && !CPU_ABSENT(idCpu) */
+}
+
+
+RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
+{
+ RTCPUID idCpu;
+
+ RTCpuSetEmpty(pSet);
+ idCpu = RTMpGetMaxCpuId();
+ do
+ {
+ if (RTMpIsCpuOnline(idCpu))
+ RTCpuSetAdd(pSet, idCpu);
+ } while (idCpu-- > 0);
+
+ return pSet;
+}
+
+
+RTDECL(RTCPUID) RTMpGetOnlineCount(void)
+{
+ return smp_get_num_cpus();
+}
+
+
+/**
+ * Wrapper between the native Haiku per-cpu callback and PFNRTWORKER
+ * for the RTMpOnAll API.
+ *
+ * @param pvArg Pointer to the RTMPARGS package.
+ */
+static void rtmpOnAllHaikuWrapper(void *pvArg, int current)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvArg;
+ pArgs->pfnWorker(current, pArgs->pvUser1, pArgs->pvUser2);
+}
+
+
+RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RTMPARGS Args;
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = NIL_RTCPUID;
+ Args.cHits = 0;
+ /* is _sync needed ? */
+ call_all_cpus_sync(rtmpOnAllHaikuWrapper, &Args);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Wrapper between the native Haiku per-cpu callback and PFNRTWORKER
+ * for the RTMpOnOthers API.
+ *
+ * @param pvArg Pointer to the RTMPARGS package.
+ */
+static void rtmpOnOthersHaikuWrapper(void *pvArg, int current)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvArg;
+ RTCPUID idCpu = current;
+ if (pArgs->idCpu != idCpu)
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+}
+
+
+RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ /* Will panic if no rendezvousing cpus, so check up front. */
+ if (RTMpGetOnlineCount() > 1)
+ {
+ RTMPARGS Args;
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = RTMpCpuId();
+ Args.cHits = 0;
+ /* is _sync needed ? */
+ call_all_cpus_sync(rtmpOnOthersHaikuWrapper, &Args);
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Wrapper between the native Haiku per-cpu callback and PFNRTWORKER
+ * for the RTMpOnSpecific API.
+ *
+ * @param pvArg Pointer to the RTMPARGS package.
+ */
+static void rtmpOnSpecificHaikuWrapper(void *pvArg, int current)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvArg;
+ RTCPUID idCpu = current;
+ if (pArgs->idCpu == idCpu)
+ {
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ ASMAtomicIncU32(&pArgs->cHits);
+ }
+}
+
+
+RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RTMPARGS Args;
+
+ /* Will panic if no rendezvousing cpus, so make sure the cpu is online. */
+ if (!RTMpIsCpuOnline(idCpu))
+ return VERR_CPU_NOT_FOUND;
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = idCpu;
+ Args.cHits = 0;
+ /* is _sync needed ? */
+ call_all_cpus_sync(rtmpOnSpecificHaikuWrapper, &Args);
+ return Args.cHits == 1
+ ? VINF_SUCCESS
+ : VERR_CPU_NOT_FOUND;
+}
+
+
+RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
+{
+ return true;
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/process-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/process-r0drv-haiku.c
new file mode 100644
index 00000000..4c7ecbe4
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/process-r0drv-haiku.c
@@ -0,0 +1,46 @@
+/* $Id: process-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Process, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/process.h>
+
+
+RTDECL(RTPROCESS) RTProcSelf(void)
+{
+ return getpid();
+}
+
+
+RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void)
+{
+ return (RTR0PROCESS)(team_id)getpid();
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/semevent-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/semevent-r0drv-haiku.c
new file mode 100644
index 00000000..2af59535
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/semevent-r0drv-haiku.c
@@ -0,0 +1,264 @@
+/* $Id: semevent-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Single Release Event Semaphores, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Haiku event semaphore.
+ */
+typedef struct RTSEMEVENTINTERNAL
+{
+ /** Magic value (RTSEMEVENT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** The semaphore Id. */
+ sem_id SemId;
+} RTSEMEVENTINTERNAL, *PRTSEMEVENTINTERNAL;
+
+
+RTDECL(int) RTSemEventCreate(PRTSEMEVENT phEventSem)
+{
+ return RTSemEventCreateEx(phEventSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...)
+{
+ AssertCompile(sizeof(RTSEMEVENTINTERNAL) > sizeof(void *));
+ AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER);
+ Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL));
+ AssertPtrReturn(phEventSem, VERR_INVALID_POINTER);
+
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)RTMemAllocZ(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ pThis->u32Magic = RTSEMEVENT_MAGIC;
+ pThis->cRefs = 1;
+ pThis->SemId = create_sem(0, "IPRT Semaphore Event");
+ if (pThis->SemId >= B_OK)
+ {
+ set_sem_owner(pThis->SemId, B_SYSTEM_TEAM);
+ *phEventSem = pThis;
+ return VINF_SUCCESS;
+ }
+
+ RTMemFree(pThis);
+ return VERR_TOO_MANY_SEMAPHORES; /** @todo r=ramshankar: use RTErrConvertFromHaikuKernReturn */
+}
+
+
+/**
+ * Retains a reference to the event semaphore.
+ *
+ * @param pThis The event semaphore.
+ */
+DECLINLINE(void) rtR0SemEventHkuRetain(PRTSEMEVENTINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs < 100000); NOREF(cRefs);
+}
+
+
+/**
+ * Releases a reference to the event semaphore.
+ *
+ * @param pThis The event semaphore.
+ */
+DECLINLINE(void) rtR0SemEventHkuRelease(PRTSEMEVENTINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ RTMemFree(pThis);
+}
+
+
+RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = hEventSem;
+ if (pThis == NIL_RTSEMEVENT)
+ return VINF_SUCCESS;
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ Assert(pThis->cRefs > 0);
+
+ /*
+ * Invalidate it and delete the semaphore to unblock everyone.
+ */
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENT_MAGIC);
+ delete_sem(pThis->SemId);
+ pThis->SemId = -1;
+ rtR0SemEventHkuRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ rtR0SemEventHkuRetain(pThis);
+
+ /*
+ * Signal the event object.
+ * We must use B_DO_NOT_RESCHEDULE since we are being used from an irq handler.
+ */
+ release_sem_etc(pThis->SemId, 1, B_DO_NOT_RESCHEDULE);
+ rtR0SemEventHkuRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventWaitEx.
+ * @param uTimeout See RTSemEventWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventWait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ status_t status;
+ int rc;
+ int32 flags = 0;
+ bigtime_t timeout; /* in microseconds */
+
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ timeout = B_INFINITE_TIMEOUT;
+ else
+ {
+ if (fFlags & RTSEMWAIT_FLAGS_NANOSECS)
+ timeout = uTimeout / 1000;
+ else if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
+ timeout = uTimeout * 1000;
+ else
+ return VERR_INVALID_PARAMETER;
+
+ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
+ flags |= B_RELATIVE_TIMEOUT;
+ else if (fFlags & RTSEMWAIT_FLAGS_ABSOLUTE)
+ flags |= B_ABSOLUTE_TIMEOUT;
+ else
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE)
+ flags |= B_CAN_INTERRUPT;
+ // likely not:
+ //else
+ // flags |= B_KILL_CAN_INTERRUPT;
+
+ rtR0SemEventHkuRetain(pThis);
+
+ status = acquire_sem_etc(pThis->SemId, 1, flags, timeout);
+
+ switch (status)
+ {
+ case B_OK:
+ rc = VINF_SUCCESS;
+ break;
+ case B_BAD_SEM_ID:
+ rc = VERR_SEM_DESTROYED;
+ break;
+ case B_INTERRUPTED:
+ rc = VERR_INTERRUPTED;
+ break;
+ case B_WOULD_BLOCK:
+ /* fallthrough ? */
+ case B_TIMED_OUT:
+ rc = VERR_TIMEOUT;
+ break;
+ default:
+ rc = RTErrConvertFromHaikuKernReturn(status);
+ break;
+ }
+
+ rtR0SemEventHkuRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventWaitEx(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventWait(hEventSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventWait(hEventSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+RT_EXPORT_SYMBOL(RTSemEventWaitEx);
+
+
+RTDECL(int) RTSemEventWaitExDebug(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventWait(hEventSem, fFlags, uTimeout, &SrcPos);
+}
+RT_EXPORT_SYMBOL(RTSemEventWaitExDebug);
+
+
+RTDECL(uint32_t) RTSemEventGetResolution(void)
+{
+ /* At least that's what the API supports. */
+ return 1000;
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/semeventmulti-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/semeventmulti-r0drv-haiku.c
new file mode 100644
index 00000000..b39ed463
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/semeventmulti-r0drv-haiku.c
@@ -0,0 +1,292 @@
+/* $Id: semeventmulti-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Multiple Release Event Semaphores, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/lockvalidator.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Haiku multiple release event semaphore.
+ */
+typedef struct RTSEMEVENTMULTIINTERNAL
+{
+ /** Magic value (RTSEMEVENTMULTI_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** The semaphore Id. */
+ sem_id SemId;
+} RTSEMEVENTMULTIINTERNAL, *PRTSEMEVENTMULTIINTERNAL;
+
+
+RTDECL(int) RTSemEventMultiCreate(PRTSEMEVENTMULTI phEventMultiSem)
+{
+ return RTSemEventMultiCreateEx(phEventMultiSem, 0 /* fFlags */, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass,
+ const char *pszNameFmt, ...)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis;
+
+ AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
+ pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ pThis->u32Magic = RTSEMEVENTMULTI_MAGIC;
+ pThis->cRefs = 1;
+ pThis->SemId = create_sem(0, "IPRT Semaphore Event Multi");
+ if (pThis->SemId < B_OK)
+ {
+ set_sem_owner(pThis->SemId, B_SYSTEM_TEAM);
+ *phEventMultiSem = pThis;
+ return VINF_SUCCESS;
+ }
+
+ RTMemFree(pThis);
+ return VERR_TOO_MANY_SEMAPHORES; /** @todo r=ramshankar: use RTErrConvertFromHaikuKernReturn */
+}
+
+
+/**
+ * Retain a reference to the semaphore.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiHkuRetain(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+}
+
+
+/**
+ * Release a reference, destroy the thing if necessary.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiHkuRelease(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ {
+ Assert(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC);
+ RTMemFree(pThis);
+ }
+}
+
+
+RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (pThis == NIL_RTSEMEVENTMULTI)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ Assert(pThis->cRefs > 0);
+
+ /*
+ * Invalidate it and signal the object just in case.
+ */
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENTMULTI_MAGIC);
+ delete_sem(pThis->SemId);
+ pThis->SemId = -1;
+ rtR0SemEventMultiHkuRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiHkuRetain(pThis);
+
+ /*
+ * Signal the event object.
+ * We must use B_DO_NOT_RESCHEDULE since we are being used from an irq handler.
+ */
+ release_sem_etc(pThis->SemId, 1, B_RELEASE_ALL | B_DO_NOT_RESCHEDULE);
+ rtR0SemEventMultiHkuRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiHkuRetain(pThis);
+
+ /*
+ * Reset it.
+ */
+ //FIXME: what should I do ???
+ // delete_sem + create_sem ??
+ rtR0SemEventMultiHkuRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventMultiWaitEx.
+ * @param uTimeout See RTSemEventMultiWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventMultiHkuWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ status_t status;
+ int rc;
+ int32 flags = 0;
+ bigtime_t timeout; /* in microseconds */
+
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ timeout = B_INFINITE_TIMEOUT;
+ else
+ {
+ if (fFlags & RTSEMWAIT_FLAGS_NANOSECS)
+ timeout = uTimeout / 1000;
+ else if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
+ timeout = uTimeout * 1000;
+ else
+ return VERR_INVALID_PARAMETER;
+
+ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
+ flags |= B_RELATIVE_TIMEOUT;
+ else if (fFlags & RTSEMWAIT_FLAGS_ABSOLUTE)
+ flags |= B_ABSOLUTE_TIMEOUT;
+ else
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE)
+ flags |= B_CAN_INTERRUPT;
+ // likely not:
+ //else
+ // flags |= B_KILL_CAN_INTERRUPT;
+
+ rtR0SemEventMultiHkuRetain(pThis);
+
+ status = acquire_sem_etc(pThis->SemId, 1, flags, timeout);
+
+ switch (status)
+ {
+ case B_OK:
+ rc = VINF_SUCCESS;
+ break;
+ case B_BAD_SEM_ID:
+ rc = VERR_SEM_DESTROYED;
+ break;
+ case B_INTERRUPTED:
+ rc = VERR_INTERRUPTED;
+ break;
+ case B_WOULD_BLOCK:
+ /* fallthrough? */
+ case B_TIMED_OUT:
+ rc = VERR_TIMEOUT;
+ break;
+ default:
+ rc = RTErrConvertFromHaikuKernReturn(status);
+ break;
+ }
+
+ rtR0SemEventMultiHkuRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventMultiWaitEx(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventMultiHkuWait(hEventMultiSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventMultiHkuWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiWaitEx);
+
+
+RTDECL(int) RTSemEventMultiWaitExDebug(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventMultiHkuWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiWaitExDebug);
+
+
+RTDECL(uint32_t) RTSemEventMultiGetResolution(void)
+{
+ /* At least that's what the API supports. */
+ return 1000;
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiGetResolution);
+
diff --git a/src/VBox/Runtime/r0drv/haiku/semfastmutex-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/semfastmutex-r0drv-haiku.c
new file mode 100644
index 00000000..dc68d1b7
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/semfastmutex-r0drv-haiku.c
@@ -0,0 +1,120 @@
+/* $Id: semfastmutex-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Fast Mutex Semaphores, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+
+#include <iprt/semaphore.h>
+#include <iprt/err.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the Haiku (sleep) mutex.
+ */
+typedef struct RTSEMFASTMUTEXINTERNAL
+{
+ /** Magic value (RTSEMFASTMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** A good old Benaphore. */
+ vint32 BenId;
+ sem_id SemId;
+} RTSEMFASTMUTEXINTERNAL, *PRTSEMFASTMUTEXINTERNAL;
+
+
+RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx)
+{
+ AssertCompile(sizeof(RTSEMFASTMUTEXINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phFastMtx, VERR_INVALID_POINTER);
+
+ PRTSEMFASTMUTEXINTERNAL pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAllocZ(sizeof(*pThis));
+ if (RT_UNLIKELY(!pThis))
+ return VERR_NO_MEMORY;
+
+ pThis->u32Magic = RTSEMFASTMUTEX_MAGIC;
+ pThis->BenId = 0;
+ pThis->SemId = create_sem(0, "IPRT Fast Mutex Semaphore");
+ if (pThis->SemId >= B_OK)
+ {
+ *phFastMtx = pThis;
+ return VINF_SUCCESS;
+ }
+ RTMemFree(pThis);
+ return VERR_TOO_MANY_SEMAPHORES; /** @todo r=ramshankar: use RTErrConvertFromHaikuKernReturn */
+}
+
+
+RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ if (pThis == NIL_RTSEMFASTMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD);
+ delete_sem(pThis->SemId);
+ RTMemFree(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRequest(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ if (atomic_add(&pThis->BenId, 1) > 0)
+ acquire_sem(pThis->SemId);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ if (atomic_add(&pThis->BenId, -1) > 1)
+ release_sem(pThis->SemId);
+
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/semmutex-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/semmutex-r0drv-haiku.c
new file mode 100644
index 00000000..2e910781
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/semmutex-r0drv-haiku.c
@@ -0,0 +1,233 @@
+/* $Id: semmutex-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Mutex Semaphores, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+#include <iprt/time.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the Haiku (sleep) mutex.
+ */
+/* XXX: not optimal, maybe should use the (private)
+ kernel recursive_lock ? (but it's not waitable) */
+typedef struct RTSEMMUTEXINTERNAL
+{
+ /** Magic value (RTSEMMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** Kernel semaphore. */
+ sem_id SemId;
+ /** Current holder */
+ volatile thread_id OwnerId;
+ /** Recursion count */
+ int32 cRecursion;
+} RTSEMMUTEXINTERNAL, *PRTSEMMUTEXINTERNAL;
+
+
+RTDECL(int) RTSemMutexCreate(PRTSEMMUTEX phMutexSem)
+{
+ AssertCompile(sizeof(RTSEMMUTEXINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phMutexSem, VERR_INVALID_POINTER);
+
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)RTMemAllocZ(sizeof(*pThis));
+ if (RT_UNLIKELY(!pThis))
+ return VERR_NO_MEMORY;
+
+ pThis->u32Magic = RTSEMMUTEX_MAGIC;
+ pThis->SemId = create_sem(0, "IPRT Mutex Semaphore");
+ if (pThis->SemId < B_OK)
+ {
+ pThis->OwnerId = -1;
+ pThis->cRecursion = 0;
+ *phMutexSem = pThis;
+ return VINF_SUCCESS;
+ }
+ RTMemFree(pThis);
+ return VERR_TOO_MANY_SEMAPHORES; /** @todo r=ramshankar: use RTErrConvertFromHaikuKernReturn */
+}
+
+
+RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ if (pThis == NIL_RTSEMMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTSEMMUTEX_MAGIC_DEAD, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE);
+
+ delete_sem(pThis->SemId);
+ RTMemFree(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker function for acquiring the mutex.
+ *
+ * @param hMutexSem The mutex object.
+ * @param fFlags Mutex flags (see RTSEMWAIT_FLAGS_*)
+ * @param uTimeout Timeout in units specified by the flags.
+ *
+ * @return IPRT status code.
+ */
+static int rtSemMutexRequestEx(RTSEMMUTEX hMutexSem, uint32_t fFlags, uint64_t uTimeout)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ int rc;
+ status_t status;
+ int32 flags = 0;
+ bigtime_t timeout; /* in microseconds */
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ if (pThis->OwnerId == find_thread(NULL))
+ {
+ pThis->OwnerId++;
+ return VINF_SUCCESS;
+ }
+
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ timeout = B_INFINITE_TIMEOUT;
+ else
+ {
+ if (fFlags & RTSEMWAIT_FLAGS_NANOSECS)
+ timeout = uTimeout / 1000;
+ else if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
+ timeout = uTimeout * 1000;
+ else
+ return VERR_INVALID_PARAMETER;
+
+ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
+ flags |= B_RELATIVE_TIMEOUT;
+ else if (fFlags & RTSEMWAIT_FLAGS_ABSOLUTE)
+ flags |= B_ABSOLUTE_TIMEOUT;
+ else
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE)
+ flags |= B_CAN_INTERRUPT;
+
+ status = acquire_sem_etc(pThis->SemId, 1, flags, timeout);
+
+ switch (status)
+ {
+ case B_OK:
+ rc = VINF_SUCCESS;
+ pThis->cRecursion = 1;
+ pThis->OwnerId = find_thread(NULL);
+ break;
+ case B_BAD_SEM_ID:
+ rc = VERR_SEM_DESTROYED;
+ break;
+ case B_INTERRUPTED:
+ rc = VERR_INTERRUPTED;
+ break;
+ case B_WOULD_BLOCK:
+ /* fallthrough? */
+ case B_TIMED_OUT:
+ rc = VERR_TIMEOUT;
+ break;
+ default:
+ rc = VERR_INVALID_PARAMETER;
+ break;
+ }
+
+ return rc;
+}
+
+
+RTDECL(int) RTSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtSemMutexRequestEx(hMutexSem, RTSEMWAIT_FLAGS_RELATIVE | RTSEMWAIT_FLAGS_RESUME | RTSEMWAIT_FLAGS_MILLISECS, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRequestDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ return RTSemMutexRequest(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtSemMutexRequestEx(hMutexSem, RTSEMWAIT_FLAGS_RELATIVE | RTSEMWAIT_FLAGS_NORESUME | RTSEMWAIT_FLAGS_MILLISECS, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResumeDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ return RTSemMutexRequestNoResume(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ if (pThis->OwnerId != find_thread(NULL))
+ return VERR_INVALID_HANDLE;
+
+ if (--pThis->cRecursion == 0)
+ {
+ pThis->OwnerId == -1;
+ release_sem(pThis->SemId);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(bool) RTSemMutexIsOwned(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ AssertPtrReturn(pThis, false);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), false);
+
+ return pThis->OwnerId != -1;
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/spinlock-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/spinlock-r0drv-haiku.c
new file mode 100644
index 00000000..b1f70f37
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/spinlock-r0drv-haiku.c
@@ -0,0 +1,138 @@
+/* $Id: spinlock-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Spinlocks, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/spinlock.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+#include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/errcore.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the KSPIN_LOCK type.
+ */
+typedef struct RTSPINLOCKINTERNAL
+{
+ /** Spinlock magic value (RTSPINLOCK_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** Spinlock creation flags */
+ uint32_t fFlags;
+ /** Saved interrupt CPU status. */
+ cpu_status volatile fIntSaved;
+ /** The Haiku spinlock structure. */
+ spinlock hSpinLock;
+} RTSPINLOCKINTERNAL, *PRTSPINLOCKINTERNAL;
+
+
+
+RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ NOREF(pszName);
+
+ /*
+ * Allocate.
+ */
+ AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *));
+ PRTSPINLOCKINTERNAL pSpinlockInt = (PRTSPINLOCKINTERNAL)RTMemAllocZ(sizeof(*pSpinlockInt));
+ if (RT_UNLIKELY(!pSpinlockInt))
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize & return.
+ */
+ pSpinlockInt->u32Magic = RTSPINLOCK_MAGIC;
+ pSpinlockInt->fFlags = fFlags;
+ pSpinlockInt->fIntSaved = 0;
+ B_INITIALIZE_SPINLOCK(&pSpinlockInt->hSpinLock);
+
+ *pSpinlock = pSpinlockInt;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSpinlockDestroy(RTSPINLOCK Spinlock)
+{
+ /*
+ * Validate input.
+ */
+ PRTSPINLOCKINTERNAL pSpinlockInt = (PRTSPINLOCKINTERNAL)Spinlock;
+ if (RT_UNLIKELY(!pSpinlockInt))
+ return VERR_INVALID_PARAMETER;
+ AssertMsgReturn(pSpinlockInt->u32Magic == RTSPINLOCK_MAGIC,
+ ("Invalid spinlock %p magic=%#x\n", pSpinlockInt, pSpinlockInt->u32Magic),
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Make the lock invalid and release the memory.
+ */
+ ASMAtomicIncU32(&pSpinlockInt->u32Magic);
+
+ B_INITIALIZE_SPINLOCK(&pSpinlockInt->hSpinLock);
+
+ RTMemFree(pSpinlockInt);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pSpinlockInt = (PRTSPINLOCKINTERNAL)Spinlock;
+ AssertPtr(pSpinlockInt);
+ Assert(pSpinlockInt->u32Magic == RTSPINLOCK_MAGIC);
+
+ /* Haiku cannot take spinlocks without disabling interrupts. Ignore our spinlock creation flags. */
+ pSpinlockInt->fIntSaved = disable_interrupts();
+ acquire_spinlock(&pSpinlockInt->hSpinLock);
+}
+
+
+RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pSpinlockInt = (PRTSPINLOCKINTERNAL)Spinlock;
+ AssertPtr(pSpinlockInt);
+ Assert(pSpinlockInt->u32Magic == RTSPINLOCK_MAGIC);
+
+ release_spinlock(&pSpinlockInt->hSpinLock);
+ restore_interrupts(pSpinlockInt->fIntSaved);
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/the-haiku-kernel.h b/src/VBox/Runtime/r0drv/haiku/the-haiku-kernel.h
new file mode 100644
index 00000000..68307486
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/the-haiku-kernel.h
@@ -0,0 +1,116 @@
+/* $Id: the-haiku-kernel.h $ */
+/** @file
+ * IPRT - Include all necessary headers for the Haiku kernel.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_haiku_the_haiku_kernel_h
+#define IPRT_INCLUDED_SRC_r0drv_haiku_the_haiku_kernel_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+
+#include <stdlib.h>
+
+#include <OS.h>
+#include <KernelExport.h>
+
+#include <iprt/cdefs.h>
+#include <iprt/err.h>
+#include <iprt/types.h>
+
+RT_C_DECLS_BEGIN
+
+/* headers/private/kernel/smp.h */
+
+extern int32 smp_get_num_cpus(void);
+extern int32 smp_get_current_cpu(void);
+
+/* headers/private/kernel/vm/vm.h */
+extern status_t vm_unreserve_address_range(team_id team, void *address, addr_t size);
+extern status_t vm_reserve_address_range(team_id team, void **_address, uint32 addressSpec, addr_t size, uint32 flags);
+extern area_id vm_clone_area(team_id team, const char *name, void **address, uint32 addressSpec, uint32 protection,
+ uint32 mapping, area_id sourceArea, bool kernel);
+
+/* headers/private/kernel/thread_type.h */
+
+extern spinlock gThreadSpinlock;
+#define GRAB_THREAD_LOCK() acquire_spinlock(&gThreadSpinlock)
+#define RELEASE_THREAD_LOCK() release_spinlock(&gThreadSpinlock)
+typedef struct
+{
+ int32 flags; /* summary of events relevant in interrupt handlers (signals pending, user debugging
+ enabled, etc.) */
+#if 0
+ Thread *all_next;
+ Thread *team_next;
+ Thread *queue_next; /* i.e. run queue, release queue, etc. */
+ timer alarm;
+ thread_id id;
+ char name[B_OS_NAME_LENGTH];
+ int32 priority;
+ int32 next_priority;
+ int32 io_priority;
+ int32 state;
+ int32 next_state;
+#endif
+ // and a lot more...
+} Thread;
+
+/* headers/private/kernel/thread.h */
+
+extern Thread* thread_get_thread_struct(thread_id id);
+extern Thread* thread_get_thread_struct_locked(thread_id id);
+
+extern void thread_yield(bool force);
+
+RT_C_DECLS_END
+
+/**
+ * Convert from Haiku kernel return code to IPRT status code.
+ * @todo put this where it belongs! (i.e. in a separate file and prototype in iprt/err.h)
+ * Or as generic call since it's not r0 specific.
+ */
+DECLINLINE(int) RTErrConvertFromHaikuKernReturn(status_t rc)
+{
+ switch (rc)
+ {
+ case B_OK: return VINF_SUCCESS;
+ case B_BAD_SEM_ID: return VERR_SEM_ERROR;
+ case B_NO_MORE_SEMS: return VERR_TOO_MANY_SEMAPHORES;
+ case B_BAD_THREAD_ID: return VERR_INVALID_PARAMETER;
+ case B_NO_MORE_THREADS: return VERR_MAX_THRDS_REACHED;
+ case B_BAD_TEAM_ID: return VERR_INVALID_PARAMETER;
+ case B_NO_MORE_TEAMS: return VERR_MAX_PROCS_REACHED;
+ //default: return VERR_GENERAL_FAILURE;
+ /** POSIX Errors are defined as a subset of system errors. */
+ default: return RTErrConvertFromErrno(rc);
+ }
+}
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_haiku_the_haiku_kernel_h */
+
diff --git a/src/VBox/Runtime/r0drv/haiku/thread-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/thread-r0drv-haiku.c
new file mode 100644
index 00000000..a1c7aef5
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/thread-r0drv-haiku.c
@@ -0,0 +1,127 @@
+/* $Id: thread-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Threads, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+#include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mp.h>
+
+
+RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
+{
+ return (RTNATIVETHREAD)find_thread(NULL);
+}
+
+
+RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ snooze((bigtime_t)cMillies * 1000);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(bool) RTThreadYield(void)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ //FIXME
+ //snooze(0);
+ thread_yield(true);
+ return true; /* this is fishy */
+}
+
+
+RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+
+ //XXX: can't do this, it might actually be held by another cpu
+ //return !B_SPINLOCK_IS_LOCKED(&gThreadSpinlock);
+ return ASMIntAreEnabled(); /** @todo find a better way. */
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+ /** @todo check if Thread::next_priority or
+ * cpu_ent::invoke_scheduler could do. */
+ return false;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
+{
+ /* RTThreadPreemptIsPending is not reliable yet. */
+ return false;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPossible(void)
+{
+ /* yes, kernel preemption is possible. */
+ return true;
+}
+
+
+RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ Assert(pState->uOldCpuState == 0);
+
+ pState->uOldCpuState = (uint32_t)disable_interrupts();
+ RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
+}
+
+
+RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+
+ RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
+ restore_interrupts((cpu_status)pState->uOldCpuState);
+ pState->uOldCpuState = 0;
+}
+
+
+RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD); NOREF(hThread);
+ /** @todo Implement RTThreadIsInInterrupt. Required for guest
+ * additions! */
+ return !ASMIntAreEnabled();
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/thread2-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/thread2-r0drv-haiku.c
new file mode 100644
index 00000000..14fe6097
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/thread2-r0drv-haiku.c
@@ -0,0 +1,138 @@
+/* $Id: thread2-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Threads (Part 2), Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+#include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include "internal/thread.h"
+
+
+DECLHIDDEN(int) rtThreadNativeInit(void)
+{
+ /* No TLS in Ring-0. :-/ */
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(RTTHREAD) RTThreadSelf(void)
+{
+ return rtThreadGetByNative((RTNATIVETHREAD)find_thread(NULL));
+}
+
+
+DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
+{
+ int32 iPriority;
+ status_t status;
+
+ /*
+ * Convert the priority type to native priorities.
+ * (This is quite naive but should be ok.)
+ */
+ switch (enmType)
+ {
+ case RTTHREADTYPE_INFREQUENT_POLLER: iPriority = B_LOWEST_ACTIVE_PRIORITY; break;
+ case RTTHREADTYPE_EMULATION: iPriority = B_LOW_PRIORITY; break;
+ case RTTHREADTYPE_DEFAULT: iPriority = B_NORMAL_PRIORITY; break;
+ case RTTHREADTYPE_MSG_PUMP: iPriority = B_DISPLAY_PRIORITY; break;
+ case RTTHREADTYPE_IO: iPriority = B_URGENT_DISPLAY_PRIORITY; break;
+ case RTTHREADTYPE_TIMER: iPriority = B_REAL_TIME_DISPLAY_PRIORITY; break;
+ default:
+ AssertMsgFailed(("enmType=%d\n", enmType));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ status = set_thread_priority((thread_id)pThread->Core.Key, iPriority);
+
+ return RTErrConvertFromHaikuKernReturn(status);
+}
+
+
+DECLHIDDEN(int) rtThreadNativeAdopt(PRTTHREADINT pThread)
+{
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+DECLHIDDEN(void) rtThreadNativeWaitKludge(PRTTHREADINT pThread)
+{
+ /** @todo fix RTThreadWait/RTR0Term race on freebsd. */
+ RTThreadSleep(1);
+}
+
+
+DECLHIDDEN(void) rtThreadNativeDestroy(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+}
+
+
+/**
+ * Native kernel thread wrapper function.
+ *
+ * This will forward to rtThreadMain and do termination upon return.
+ *
+ * @param pvArg Pointer to the argument package.
+ * @param Ignored Wait result, which we ignore.
+ */
+static status_t rtThreadNativeMain(void *pvArg)
+{
+ const thread_id Self = find_thread(NULL);
+ PRTTHREADINT pThread = (PRTTHREADINT)pvArg;
+
+ int rc = rtThreadMain(pThread, (RTNATIVETHREAD)Self, &pThread->szName[0]);
+
+ if (rc < 0)
+ return RTErrConvertFromHaikuKernReturn(rc);
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
+{
+ thread_id NativeThread;
+ RT_ASSERT_PREEMPTIBLE();
+
+ NativeThread = spawn_kernel_thread(rtThreadNativeMain, pThreadInt->szName, B_NORMAL_PRIORITY, pThreadInt);
+ if (NativeThread >= B_OK)
+ {
+ resume_thread(NativeThread);
+ *pNativeThread = (RTNATIVETHREAD)NativeThread;
+ return VINF_SUCCESS;
+ }
+ return RTErrConvertFromHaikuKernReturn(NativeThread);
+}
+
diff --git a/src/VBox/Runtime/r0drv/haiku/time-r0drv-haiku.c b/src/VBox/Runtime/r0drv/haiku/time-r0drv-haiku.c
new file mode 100644
index 00000000..58091556
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/haiku/time-r0drv-haiku.c
@@ -0,0 +1,79 @@
+/* $Id: time-r0drv-haiku.c $ */
+/** @file
+ * IPRT - Time, Ring-0 Driver, Haiku.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP RTLOGGROUP_TIME
+#include "the-haiku-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/time.h>
+
+#include <iprt/asm.h>
+
+
+DECLINLINE(uint64_t) rtTimeGetSystemNanoTS(void)
+{
+ return system_time() * RT_NS_1US;
+}
+
+
+DECLINLINE(uint64_t) rtTimeGetSystemMilliTS(void)
+{
+ return system_time() / RT_NS_1US;
+}
+
+
+RTDECL(uint64_t) RTTimeNanoTS(void)
+{
+ return rtTimeGetSystemNanoTS();
+}
+
+
+RTDECL(uint64_t) RTTimeMilliTS(void)
+{
+ return rtTimeGetSystemMilliTS();
+}
+
+
+RTDECL(uint64_t) RTTimeSystemNanoTS(void)
+{
+ return rtTimeGetSystemNanoTS();
+}
+
+
+RTDECL(uint64_t) RTTimeSystemMilliTS(void)
+{
+ return rtTimeGetSystemMilliTS();
+}
+
+
+RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
+{
+ return RTTimeSpecSetNano(pTime, real_time_clock_usecs() * RT_NS_1US);
+}
+
diff --git a/src/VBox/Runtime/r0drv/initterm-r0drv.cpp b/src/VBox/Runtime/r0drv/initterm-r0drv.cpp
new file mode 100644
index 00000000..894edeff
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/initterm-r0drv.cpp
@@ -0,0 +1,164 @@
+/* $Id: initterm-r0drv.cpp $ */
+/** @file
+ * IPRT - Initialization & Termination, R0 Driver, Common.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/initterm.h>
+#include "internal/iprt.h"
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mp.h>
+#include <iprt/thread.h>
+#ifndef IN_GUEST /* play safe for now */
+# include "r0drv/mp-r0drv.h"
+# include "r0drv/power-r0drv.h"
+#endif
+
+#include "internal/initterm.h"
+#include "internal/mem.h"
+#include "internal/thread.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Count of current IPRT users.
+ * In ring-0 several drivers / kmods / kexts / wossnames may share the
+ * same runtime code. So, we need to keep count in order not to terminate
+ * it prematurely. */
+static int32_t volatile g_crtR0Users = 0;
+
+
+/**
+ * Initializes the ring-0 driver runtime library.
+ *
+ * @returns iprt status code.
+ * @param fReserved Flags reserved for the future.
+ */
+RTR0DECL(int) RTR0Init(unsigned fReserved)
+{
+ int rc;
+ uint32_t cNewUsers;
+ Assert(fReserved == 0); RT_NOREF_PV(fReserved);
+#ifndef RT_OS_SOLARIS /* On Solaris our thread preemption information is only obtained in rtR0InitNative().*/
+ RT_ASSERT_PREEMPTIBLE();
+#endif
+
+ /*
+ * The first user initializes it.
+ * We rely on the module loader to ensure that there are no
+ * initialization races should two modules share the IPRT.
+ */
+ cNewUsers = ASMAtomicIncS32(&g_crtR0Users);
+ if (cNewUsers != 1)
+ {
+ if (cNewUsers > 1)
+ return VINF_SUCCESS;
+ ASMAtomicDecS32(&g_crtR0Users);
+ return VERR_INTERNAL_ERROR_3;
+ }
+
+ rc = rtR0InitNative();
+ if (RT_SUCCESS(rc))
+ {
+#ifdef RTR0MEM_WITH_EF_APIS
+ rtR0MemEfInit();
+#endif
+ rc = rtThreadInit();
+ if (RT_SUCCESS(rc))
+ {
+#ifndef IN_GUEST /* play safe for now */
+ rc = rtR0MpNotificationInit();
+ if (RT_SUCCESS(rc))
+ {
+ rc = rtR0PowerNotificationInit();
+ if (RT_SUCCESS(rc))
+ return rc;
+ rtR0MpNotificationTerm();
+ }
+#else
+ if (RT_SUCCESS(rc))
+ return rc;
+#endif
+ rtThreadTerm();
+ }
+#ifdef RTR0MEM_WITH_EF_APIS
+ rtR0MemEfTerm();
+#endif
+ rtR0TermNative();
+ }
+ return rc;
+}
+RT_EXPORT_SYMBOL(RTR0Init);
+
+
+static void rtR0Term(void)
+{
+ rtThreadTerm();
+#ifndef IN_GUEST /* play safe for now */
+ rtR0PowerNotificationTerm();
+ rtR0MpNotificationTerm();
+#endif
+#ifdef RTR0MEM_WITH_EF_APIS
+ rtR0MemEfTerm();
+#endif
+ rtR0TermNative();
+}
+
+
+/**
+ * Terminates the ring-0 driver runtime library.
+ */
+RTR0DECL(void) RTR0Term(void)
+{
+ int32_t cNewUsers;
+ RT_ASSERT_PREEMPTIBLE();
+
+ cNewUsers = ASMAtomicDecS32(&g_crtR0Users);
+ Assert(cNewUsers >= 0);
+ if (cNewUsers == 0)
+ rtR0Term();
+ else if (cNewUsers < 0)
+ ASMAtomicIncS32(&g_crtR0Users);
+}
+RT_EXPORT_SYMBOL(RTR0Term);
+
+
+/* Note! Should *not* be exported since it's only for static linking. */
+RTR0DECL(void) RTR0TermForced(void)
+{
+ RT_ASSERT_PREEMPTIBLE();
+
+ AssertMsg(g_crtR0Users == 1, ("%d\n", g_crtR0Users));
+ ASMAtomicWriteS32(&g_crtR0Users, 0);
+
+ rtR0Term();
+}
+
diff --git a/src/VBox/Runtime/r0drv/linux/Makefile.kup b/src/VBox/Runtime/r0drv/linux/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/Makefile.kup
diff --git a/src/VBox/Runtime/r0drv/linux/RTLogWriteDebugger-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/RTLogWriteDebugger-r0drv-linux.c
new file mode 100644
index 00000000..6d782b56
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/RTLogWriteDebugger-r0drv-linux.c
@@ -0,0 +1,43 @@
+/* $Id: RTLogWriteDebugger-r0drv-linux.c $ */
+/** @file
+ * IPRT - Log To Debugger, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/log.h>
+
+
+RTDECL(void) RTLogWriteDebugger(const char *pch, size_t cb)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ printk("%.*s", (int)cb, pch);
+ IPRT_LINUX_RESTORE_EFL_AC();
+}
+RT_EXPORT_SYMBOL(RTLogWriteDebugger);
+
diff --git a/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
new file mode 100644
index 00000000..e7240915
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
@@ -0,0 +1,501 @@
+/* $Id: alloc-r0drv-linux.c $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include "r0drv/alloc-r0drv.h"
+
+
+#if (defined(RT_ARCH_AMD64) || defined(DOXYGEN_RUNNING)) && !defined(RTMEMALLOC_EXEC_HEAP)
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
+/**
+ * Starting with 2.6.23 we can use __get_vm_area and map_vm_area to allocate
+ * memory in the moduel range. This is preferrable to the exec heap below.
+ */
+# define RTMEMALLOC_EXEC_VM_AREA
+# else
+/**
+ * We need memory in the module range (~2GB to ~0) this can only be obtained
+ * thru APIs that are not exported (see module_alloc()).
+ *
+ * So, we'll have to create a quick and dirty heap here using BSS memory.
+ * Very annoying and it's going to restrict us!
+ */
+# define RTMEMALLOC_EXEC_HEAP
+# endif
+#endif
+
+#ifdef RTMEMALLOC_EXEC_HEAP
+# include <iprt/heap.h>
+# include <iprt/spinlock.h>
+# include <iprt/errcore.h>
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+#ifdef RTMEMALLOC_EXEC_VM_AREA
+/**
+ * Extended header used for headers marked with RTMEMHDR_FLAG_EXEC_VM_AREA.
+ *
+ * This is used with allocating executable memory, for things like generated
+ * code and loaded modules.
+ */
+typedef struct RTMEMLNXHDREX
+{
+ /** The VM area for this allocation. */
+ struct vm_struct *pVmArea;
+ void *pvDummy;
+ /** The header we present to the generic API. */
+ RTMEMHDR Hdr;
+} RTMEMLNXHDREX;
+AssertCompileSize(RTMEMLNXHDREX, 32);
+/** Pointer to an extended memory header. */
+typedef RTMEMLNXHDREX *PRTMEMLNXHDREX;
+#endif
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+#ifdef RTMEMALLOC_EXEC_HEAP
+/** The heap. */
+static RTHEAPSIMPLE g_HeapExec = NIL_RTHEAPSIMPLE;
+/** Spinlock protecting the heap. */
+static RTSPINLOCK g_HeapExecSpinlock = NIL_RTSPINLOCK;
+#endif
+
+
+/**
+ * API for cleaning up the heap spinlock on IPRT termination.
+ * This is as RTMemExecDonate specific to AMD64 Linux/GNU.
+ */
+DECLHIDDEN(void) rtR0MemExecCleanup(void)
+{
+#ifdef RTMEMALLOC_EXEC_HEAP
+ RTSpinlockDestroy(g_HeapExecSpinlock);
+ g_HeapExecSpinlock = NIL_RTSPINLOCK;
+#endif
+}
+
+
+/**
+ * Donate read+write+execute memory to the exec heap.
+ *
+ * This API is specific to AMD64 and Linux/GNU. A kernel module that desires to
+ * use RTMemExecAlloc on AMD64 Linux/GNU will have to donate some statically
+ * allocated memory in the module if it wishes for GCC generated code to work.
+ * GCC can only generate modules that work in the address range ~2GB to ~0
+ * currently.
+ *
+ * The API only accept one single donation.
+ *
+ * @returns IPRT status code.
+ * @retval VERR_NOT_SUPPORTED if the code isn't enabled.
+ * @param pvMemory Pointer to the memory block.
+ * @param cb The size of the memory block.
+ */
+RTR0DECL(int) RTR0MemExecDonate(void *pvMemory, size_t cb)
+{
+#ifdef RTMEMALLOC_EXEC_HEAP
+ int rc;
+ AssertReturn(g_HeapExec == NIL_RTHEAPSIMPLE, VERR_WRONG_ORDER);
+
+ rc = RTSpinlockCreate(&g_HeapExecSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTR0MemExecDonate");
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTHeapSimpleInit(&g_HeapExec, pvMemory, cb);
+ if (RT_FAILURE(rc))
+ rtR0MemExecCleanup();
+ }
+ return rc;
+#else
+ RT_NOREF_PV(pvMemory); RT_NOREF_PV(cb);
+ return VERR_NOT_SUPPORTED;
+#endif
+}
+RT_EXPORT_SYMBOL(RTR0MemExecDonate);
+
+
+
+#ifdef RTMEMALLOC_EXEC_VM_AREA
+/**
+ * Allocate executable kernel memory in the module range.
+ *
+ * @returns Pointer to a allocation header success. NULL on failure.
+ *
+ * @param cb The size the user requested.
+ */
+static PRTMEMHDR rtR0MemAllocExecVmArea(size_t cb)
+{
+ size_t const cbAlloc = RT_ALIGN_Z(sizeof(RTMEMLNXHDREX) + cb, PAGE_SIZE);
+ size_t const cPages = cbAlloc >> PAGE_SHIFT;
+ struct page **papPages;
+ struct vm_struct *pVmArea;
+ size_t iPage;
+
+ pVmArea = __get_vm_area(cbAlloc, VM_ALLOC, MODULES_VADDR, MODULES_END);
+ if (!pVmArea)
+ return NULL;
+ pVmArea->nr_pages = 0; /* paranoia? */
+ pVmArea->pages = NULL; /* paranoia? */
+
+ papPages = (struct page **)kmalloc(cPages * sizeof(papPages[0]), GFP_KERNEL | __GFP_NOWARN);
+ if (!papPages)
+ {
+ vunmap(pVmArea->addr);
+ return NULL;
+ }
+
+ for (iPage = 0; iPage < cPages; iPage++)
+ {
+ papPages[iPage] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN);
+ if (!papPages[iPage])
+ break;
+ }
+ if (iPage == cPages)
+ {
+ /*
+ * Map the pages.
+ *
+ * Not entirely sure we really need to set nr_pages and pages here, but
+ * they provide a very convenient place for storing something we need
+ * in the free function, if nothing else...
+ */
+# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ struct page **papPagesIterator = papPages;
+# endif
+ pVmArea->nr_pages = cPages;
+ pVmArea->pages = papPages;
+ if (!map_vm_area(pVmArea, PAGE_KERNEL_EXEC,
+# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ &papPagesIterator
+# else
+ papPages
+# endif
+ ))
+ {
+ PRTMEMLNXHDREX pHdrEx = (PRTMEMLNXHDREX)pVmArea->addr;
+ pHdrEx->pVmArea = pVmArea;
+ pHdrEx->pvDummy = NULL;
+ return &pHdrEx->Hdr;
+ }
+ /* bail out */
+# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ pVmArea->nr_pages = papPagesIterator - papPages;
+# endif
+ }
+
+ vunmap(pVmArea->addr);
+
+ while (iPage-- > 0)
+ __free_page(papPages[iPage]);
+ kfree(papPages);
+
+ return NULL;
+}
+#endif /* RTMEMALLOC_EXEC_VM_AREA */
+
+
+/**
+ * OS specific allocation function.
+ */
+DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
+{
+ PRTMEMHDR pHdr;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Allocate.
+ */
+ if (fFlags & RTMEMHDR_FLAG_EXEC)
+ {
+ if (fFlags & RTMEMHDR_FLAG_ANY_CTX)
+ return VERR_NOT_SUPPORTED;
+
+#if defined(RT_ARCH_AMD64)
+# ifdef RTMEMALLOC_EXEC_HEAP
+ if (g_HeapExec != NIL_RTHEAPSIMPLE)
+ {
+ RTSpinlockAcquire(g_HeapExecSpinlock);
+ pHdr = (PRTMEMHDR)RTHeapSimpleAlloc(g_HeapExec, cb + sizeof(*pHdr), 0);
+ RTSpinlockRelease(g_HeapExecSpinlock);
+ fFlags |= RTMEMHDR_FLAG_EXEC_HEAP;
+ }
+ else
+ pHdr = NULL;
+
+# elif defined(RTMEMALLOC_EXEC_VM_AREA)
+ pHdr = rtR0MemAllocExecVmArea(cb);
+ fFlags |= RTMEMHDR_FLAG_EXEC_VM_AREA;
+
+# else /* !RTMEMALLOC_EXEC_HEAP */
+# error "you don not want to go here..."
+ pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC);
+# endif /* !RTMEMALLOC_EXEC_HEAP */
+
+#elif defined(PAGE_KERNEL_EXEC) && defined(CONFIG_X86_PAE)
+ pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC);
+#else
+ pHdr = (PRTMEMHDR)vmalloc(cb + sizeof(*pHdr));
+#endif
+ }
+ else
+ {
+ if (
+#if 1 /* vmalloc has serious performance issues, avoid it. */
+ cb <= PAGE_SIZE*16 - sizeof(*pHdr)
+#else
+ cb <= PAGE_SIZE
+#endif
+ || (fFlags & RTMEMHDR_FLAG_ANY_CTX)
+ )
+ {
+ fFlags |= RTMEMHDR_FLAG_KMALLOC;
+ pHdr = kmalloc(cb + sizeof(*pHdr),
+ (fFlags & RTMEMHDR_FLAG_ANY_CTX_ALLOC) ? (GFP_ATOMIC | __GFP_NOWARN)
+ : (GFP_KERNEL | __GFP_NOWARN));
+ if (RT_UNLIKELY( !pHdr
+ && cb > PAGE_SIZE
+ && !(fFlags & RTMEMHDR_FLAG_ANY_CTX) ))
+ {
+ fFlags &= ~RTMEMHDR_FLAG_KMALLOC;
+ pHdr = vmalloc(cb + sizeof(*pHdr));
+ }
+ }
+ else
+ pHdr = vmalloc(cb + sizeof(*pHdr));
+ }
+ if (RT_UNLIKELY(!pHdr))
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+ }
+
+ /*
+ * Initialize.
+ */
+ pHdr->u32Magic = RTMEMHDR_MAGIC;
+ pHdr->fFlags = fFlags;
+ pHdr->cb = cb;
+ pHdr->cbReq = cb;
+
+ *ppHdr = pHdr;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * OS specific free function.
+ */
+DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ pHdr->u32Magic += 1;
+ if (pHdr->fFlags & RTMEMHDR_FLAG_KMALLOC)
+ kfree(pHdr);
+#ifdef RTMEMALLOC_EXEC_HEAP
+ else if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC_HEAP)
+ {
+ RTSpinlockAcquire(g_HeapExecSpinlock);
+ RTHeapSimpleFree(g_HeapExec, pHdr);
+ RTSpinlockRelease(g_HeapExecSpinlock);
+ }
+#endif
+#ifdef RTMEMALLOC_EXEC_VM_AREA
+ else if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC_VM_AREA)
+ {
+ PRTMEMLNXHDREX pHdrEx = RT_FROM_MEMBER(pHdr, RTMEMLNXHDREX, Hdr);
+ size_t iPage = pHdrEx->pVmArea->nr_pages;
+ struct page **papPages = pHdrEx->pVmArea->pages;
+ void *pvMapping = pHdrEx->pVmArea->addr;
+
+ vunmap(pvMapping);
+
+ while (iPage-- > 0)
+ __free_page(papPages[iPage]);
+ kfree(papPages);
+ }
+#endif
+ else
+ vfree(pHdr);
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+}
+
+
+
+/**
+ * Compute order. Some functions allocate 2^order pages.
+ *
+ * @returns order.
+ * @param cPages Number of pages.
+ */
+static int CalcPowerOf2Order(unsigned long cPages)
+{
+ int iOrder;
+ unsigned long cTmp;
+
+ for (iOrder = 0, cTmp = cPages; cTmp >>= 1; ++iOrder)
+ ;
+ if (cPages & ~(1 << iOrder))
+ ++iOrder;
+
+ return iOrder;
+}
+
+
+/**
+ * Allocates physical contiguous memory (below 4GB).
+ * The allocation is page aligned and the content is undefined.
+ *
+ * @returns Pointer to the memory block. This is page aligned.
+ * @param pPhys Where to store the physical address.
+ * @param cb The allocation size in bytes. This is always
+ * rounded up to PAGE_SIZE.
+ */
+RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
+{
+ int cOrder;
+ unsigned cPages;
+ struct page *paPages;
+ void *pvRet;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * validate input.
+ */
+ Assert(VALID_PTR(pPhys));
+ Assert(cb > 0);
+
+ /*
+ * Allocate page pointer array.
+ */
+ cb = RT_ALIGN_Z(cb, PAGE_SIZE);
+ cPages = cb >> PAGE_SHIFT;
+ cOrder = CalcPowerOf2Order(cPages);
+#if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32)
+ /* ZONE_DMA32: 0-4GB */
+ paPages = alloc_pages(GFP_DMA32 | __GFP_NOWARN, cOrder);
+ if (!paPages)
+#endif
+#ifdef RT_ARCH_AMD64
+ /* ZONE_DMA; 0-16MB */
+ paPages = alloc_pages(GFP_DMA | __GFP_NOWARN, cOrder);
+#else
+ /* ZONE_NORMAL: 0-896MB */
+ paPages = alloc_pages(GFP_USER | __GFP_NOWARN, cOrder);
+#endif
+ if (paPages)
+ {
+ /*
+ * Reserve the pages and mark them executable.
+ */
+ unsigned iPage;
+ for (iPage = 0; iPage < cPages; iPage++)
+ {
+ Assert(!PageHighMem(&paPages[iPage]));
+ if (iPage + 1 < cPages)
+ {
+ AssertMsg( (uintptr_t)phys_to_virt(page_to_phys(&paPages[iPage])) + PAGE_SIZE
+ == (uintptr_t)phys_to_virt(page_to_phys(&paPages[iPage + 1]))
+ && page_to_phys(&paPages[iPage]) + PAGE_SIZE
+ == page_to_phys(&paPages[iPage + 1]),
+ ("iPage=%i cPages=%u [0]=%#llx,%p [1]=%#llx,%p\n", iPage, cPages,
+ (long long)page_to_phys(&paPages[iPage]), phys_to_virt(page_to_phys(&paPages[iPage])),
+ (long long)page_to_phys(&paPages[iPage + 1]), phys_to_virt(page_to_phys(&paPages[iPage + 1])) ));
+ }
+
+ SetPageReserved(&paPages[iPage]);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 20) /** @todo find the exact kernel where change_page_attr was introduced. */
+ MY_SET_PAGES_EXEC(&paPages[iPage], 1);
+#endif
+ }
+ *pPhys = page_to_phys(paPages);
+ pvRet = phys_to_virt(page_to_phys(paPages));
+ }
+ else
+ pvRet = NULL;
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return pvRet;
+}
+RT_EXPORT_SYMBOL(RTMemContAlloc);
+
+
+/**
+ * Frees memory allocated using RTMemContAlloc().
+ *
+ * @param pv Pointer to return from RTMemContAlloc().
+ * @param cb The cb parameter passed to RTMemContAlloc().
+ */
+RTR0DECL(void) RTMemContFree(void *pv, size_t cb)
+{
+ if (pv)
+ {
+ int cOrder;
+ unsigned cPages;
+ unsigned iPage;
+ struct page *paPages;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /* validate */
+ AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv));
+ Assert(cb > 0);
+
+ /* calc order and get pages */
+ cb = RT_ALIGN_Z(cb, PAGE_SIZE);
+ cPages = cb >> PAGE_SHIFT;
+ cOrder = CalcPowerOf2Order(cPages);
+ paPages = virt_to_page(pv);
+
+ /*
+ * Restore page attributes freeing the pages.
+ */
+ for (iPage = 0; iPage < cPages; iPage++)
+ {
+ ClearPageReserved(&paPages[iPage]);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 20) /** @todo find the exact kernel where change_page_attr was introduced. */
+ MY_SET_PAGES_NOEXEC(&paPages[iPage], 1);
+#endif
+ }
+ __free_pages(paPages, cOrder);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ }
+}
+RT_EXPORT_SYMBOL(RTMemContFree);
+
diff --git a/src/VBox/Runtime/r0drv/linux/assert-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/assert-r0drv-linux.c
new file mode 100644
index 00000000..fd42aa5d
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/assert-r0drv-linux.c
@@ -0,0 +1,74 @@
+/* $Id: assert-r0drv-linux.c $ */
+/** @file
+ * IPRT - Assertion Workers, Ring-0 Drivers, Linux.
+ */
+
+/*
+ * Copyright (C) 2007-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+#include <iprt/log.h>
+#include <iprt/string.h>
+#include <iprt/stdarg.h>
+#include <iprt/asm.h>
+
+#include "internal/assert.h"
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ printk(KERN_EMERG
+ "\r\n!!Assertion Failed!!\r\n"
+ "Expression: %s\r\n"
+ "Location : %s(%d) %s\r\n",
+ pszExpr, pszFile, uLine, pszFunction);
+ IPRT_LINUX_RESTORE_EFL_AC();
+}
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg2V(bool fInitial, const char *pszFormat, va_list va)
+{
+ char szMsg[256];
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ RTStrPrintfV(szMsg, sizeof(szMsg) - 1, pszFormat, va);
+ szMsg[sizeof(szMsg) - 1] = '\0';
+ printk(KERN_EMERG "%s", szMsg);
+
+ NOREF(fInitial);
+ IPRT_LINUX_RESTORE_EFL_AC();
+}
+
+
+RTR0DECL(void) RTR0AssertPanicSystem(void)
+{
+ panic("%s%s", g_szRTAssertMsg1, g_szRTAssertMsg2);
+}
+RT_EXPORT_SYMBOL(RTR0AssertPanicSystem);
+
diff --git a/src/VBox/Runtime/r0drv/linux/initterm-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/initterm-r0drv-linux.c
new file mode 100644
index 00000000..aeb4d97e
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/initterm-r0drv-linux.c
@@ -0,0 +1,137 @@
+/* $Id: initterm-r0drv-linux.c $ */
+/** @file
+ * IPRT - Initialization & Termination, R0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/errcore.h>
+#include <iprt/assert.h>
+#include "internal/initterm.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** The IPRT work queue. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
+static struct workqueue_struct *g_prtR0LnxWorkQueue;
+#else
+static DECLARE_TASK_QUEUE(g_rtR0LnxWorkQueue);
+#endif
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+/* in alloc-r0drv0-linux.c */
+DECLHIDDEN(void) rtR0MemExecCleanup(void);
+
+
+/**
+ * Pushes an item onto the IPRT work queue.
+ *
+ * @param pWork The work item.
+ * @param pfnWorker The callback function. It will be called back
+ * with @a pWork as argument.
+ */
+DECLHIDDEN(void) rtR0LnxWorkqueuePush(RTR0LNXWORKQUEUEITEM *pWork, void (*pfnWorker)(RTR0LNXWORKQUEUEITEM *))
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+ INIT_WORK(pWork, pfnWorker);
+# else
+ INIT_WORK(pWork, (void (*)(void *))pfnWorker, pWork);
+# endif
+ queue_work(g_prtR0LnxWorkQueue, pWork);
+#else
+ INIT_TQUEUE(pWork, (void (*)(void *))pfnWorker, pWork);
+ queue_task(pWork, &g_rtR0LnxWorkQueue);
+#endif
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+}
+
+
+/**
+ * Flushes all items in the IPRT work queue.
+ *
+ * @remarks This is mostly for 2.4.x compatability. Must not be called from
+ * atomic contexts or with unncessary locks held.
+ */
+DECLHIDDEN(void) rtR0LnxWorkqueueFlush(void)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
+ flush_workqueue(g_prtR0LnxWorkQueue);
+#else
+ run_task_queue(&g_rtR0LnxWorkQueue);
+#endif
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+}
+
+
+DECLHIDDEN(int) rtR0InitNative(void)
+{
+ int rc = VINF_SUCCESS;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
+ g_prtR0LnxWorkQueue = create_workqueue("iprt-VBoxWQueue");
+ #else
+ g_prtR0LnxWorkQueue = create_workqueue("iprt-VBoxQ");
+ #endif
+ if (!g_prtR0LnxWorkQueue)
+ rc = VERR_NO_MEMORY;
+#endif
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(void) rtR0TermNative(void)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ rtR0LnxWorkqueueFlush();
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
+ destroy_workqueue(g_prtR0LnxWorkQueue);
+ g_prtR0LnxWorkQueue = NULL;
+#endif
+
+ rtR0MemExecCleanup();
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+}
+
diff --git a/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
new file mode 100644
index 00000000..d6f39400
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
@@ -0,0 +1,1768 @@
+/* $Id: memobj-r0drv-linux.c $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+
+#include <iprt/memobj.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/log.h>
+#include <iprt/mem.h>
+#include <iprt/process.h>
+#include <iprt/string.h>
+#include "internal/memobj.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/* early 2.6 kernels */
+#ifndef PAGE_SHARED_EXEC
+# define PAGE_SHARED_EXEC PAGE_SHARED
+#endif
+#ifndef PAGE_READONLY_EXEC
+# define PAGE_READONLY_EXEC PAGE_READONLY
+#endif
+
+/*
+ * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
+ * track_pfn_vma_new() is apparently not defined for non-RAM pages.
+ * It should be safe to use vm_insert_page() older kernels as well.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
+# define VBOX_USE_INSERT_PAGE
+#endif
+#if defined(CONFIG_X86_PAE) \
+ && ( defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) \
+ || ( LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) \
+ && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)))
+# define VBOX_USE_PAE_HACK
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The Darwin version of the memory object structure.
+ */
+typedef struct RTR0MEMOBJLNX
+{
+ /** The core structure. */
+ RTR0MEMOBJINTERNAL Core;
+ /** Set if the allocation is contiguous.
+ * This means it has to be given back as one chunk. */
+ bool fContiguous;
+ /** Set if we've vmap'ed the memory into ring-0. */
+ bool fMappedToRing0;
+ /** The pages in the apPages array. */
+ size_t cPages;
+ /** Array of struct page pointers. (variable size) */
+ struct page *apPages[1];
+} RTR0MEMOBJLNX, *PRTR0MEMOBJLNX;
+
+
+static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx);
+
+
+/**
+ * Helper that converts from a RTR0PROCESS handle to a linux task.
+ *
+ * @returns The corresponding Linux task.
+ * @param R0Process IPRT ring-0 process handle.
+ */
+static struct task_struct *rtR0ProcessToLinuxTask(RTR0PROCESS R0Process)
+{
+ /** @todo fix rtR0ProcessToLinuxTask!! */
+ /** @todo many (all?) callers currently assume that we return 'current'! */
+ return R0Process == RTR0ProcHandleSelf() ? current : NULL;
+}
+
+
+/**
+ * Compute order. Some functions allocate 2^order pages.
+ *
+ * @returns order.
+ * @param cPages Number of pages.
+ */
+static int rtR0MemObjLinuxOrder(size_t cPages)
+{
+ int iOrder;
+ size_t cTmp;
+
+ for (iOrder = 0, cTmp = cPages; cTmp >>= 1; ++iOrder)
+ ;
+ if (cPages & ~((size_t)1 << iOrder))
+ ++iOrder;
+
+ return iOrder;
+}
+
+
+/**
+ * Converts from RTMEM_PROT_* to Linux PAGE_*.
+ *
+ * @returns Linux page protection constant.
+ * @param fProt The IPRT protection mask.
+ * @param fKernel Whether it applies to kernel or user space.
+ */
+static pgprot_t rtR0MemObjLinuxConvertProt(unsigned fProt, bool fKernel)
+{
+ switch (fProt)
+ {
+ default:
+ AssertMsgFailed(("%#x %d\n", fProt, fKernel));
+ case RTMEM_PROT_NONE:
+ return PAGE_NONE;
+
+ case RTMEM_PROT_READ:
+ return fKernel ? PAGE_KERNEL_RO : PAGE_READONLY;
+
+ case RTMEM_PROT_WRITE:
+ case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
+ return fKernel ? PAGE_KERNEL : PAGE_SHARED;
+
+ case RTMEM_PROT_EXEC:
+ case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
+#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ if (fKernel)
+ {
+ pgprot_t fPg = MY_PAGE_KERNEL_EXEC;
+ pgprot_val(fPg) &= ~_PAGE_RW;
+ return fPg;
+ }
+ return PAGE_READONLY_EXEC;
+#else
+ return fKernel ? MY_PAGE_KERNEL_EXEC : PAGE_READONLY_EXEC;
+#endif
+
+ case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
+ case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_READ:
+ return fKernel ? MY_PAGE_KERNEL_EXEC : PAGE_SHARED_EXEC;
+ }
+}
+
+
+/**
+ * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates
+ * an empty user space mapping.
+ *
+ * We acquire the mmap_sem of the task!
+ *
+ * @returns Pointer to the mapping.
+ * (void *)-1 on failure.
+ * @param R3PtrFixed (RTR3PTR)-1 if anywhere, otherwise a specific location.
+ * @param cb The size of the mapping.
+ * @param uAlignment The alignment of the mapping.
+ * @param pTask The Linux task to create this mapping in.
+ * @param fProt The RTMEM_PROT_* mask.
+ */
+static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, struct task_struct *pTask, unsigned fProt)
+{
+ unsigned fLnxProt;
+ unsigned long ulAddr;
+
+ Assert(pTask == current); /* do_mmap */
+ RT_NOREF_PV(pTask);
+
+ /*
+ * Convert from IPRT protection to mman.h PROT_ and call do_mmap.
+ */
+ fProt &= (RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
+ if (fProt == RTMEM_PROT_NONE)
+ fLnxProt = PROT_NONE;
+ else
+ {
+ fLnxProt = 0;
+ if (fProt & RTMEM_PROT_READ)
+ fLnxProt |= PROT_READ;
+ if (fProt & RTMEM_PROT_WRITE)
+ fLnxProt |= PROT_WRITE;
+ if (fProt & RTMEM_PROT_EXEC)
+ fLnxProt |= PROT_EXEC;
+ }
+
+ if (R3PtrFixed != (RTR3PTR)-1)
+ {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+ ulAddr = vm_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
+#else
+ down_write(&pTask->mm->mmap_sem);
+ ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
+ up_write(&pTask->mm->mmap_sem);
+#endif
+ }
+ else
+ {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+ ulAddr = vm_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
+#else
+ down_write(&pTask->mm->mmap_sem);
+ ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
+ up_write(&pTask->mm->mmap_sem);
+#endif
+ if ( !(ulAddr & ~PAGE_MASK)
+ && (ulAddr & (uAlignment - 1)))
+ {
+ /** @todo implement uAlignment properly... We'll probably need to make some dummy mappings to fill
+ * up alignment gaps. This is of course complicated by fragmentation (which we might have cause
+ * ourselves) and further by there begin two mmap strategies (top / bottom). */
+ /* For now, just ignore uAlignment requirements... */
+ }
+ }
+
+
+ if (ulAddr & ~PAGE_MASK) /* ~PAGE_MASK == PAGE_OFFSET_MASK */
+ return (void *)-1;
+ return (void *)ulAddr;
+}
+
+
+/**
+ * Worker that destroys a user space mapping.
+ * Undoes what rtR0MemObjLinuxDoMmap did.
+ *
+ * We acquire the mmap_sem of the task!
+ *
+ * @param pv The ring-3 mapping.
+ * @param cb The size of the mapping.
+ * @param pTask The Linux task to destroy this mapping in.
+ */
+static void rtR0MemObjLinuxDoMunmap(void *pv, size_t cb, struct task_struct *pTask)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+ Assert(pTask == current); RT_NOREF_PV(pTask);
+ vm_munmap((unsigned long)pv, cb);
+#elif defined(USE_RHEL4_MUNMAP)
+ down_write(&pTask->mm->mmap_sem);
+ do_munmap(pTask->mm, (unsigned long)pv, cb, 0); /* should it be 1 or 0? */
+ up_write(&pTask->mm->mmap_sem);
+#else
+ down_write(&pTask->mm->mmap_sem);
+ do_munmap(pTask->mm, (unsigned long)pv, cb);
+ up_write(&pTask->mm->mmap_sem);
+#endif
+}
+
+
+/**
+ * Internal worker that allocates physical pages and creates the memory object for them.
+ *
+ * @returns IPRT status code.
+ * @param ppMemLnx Where to store the memory object pointer.
+ * @param enmType The object type.
+ * @param cb The number of bytes to allocate.
+ * @param uAlignment The alignment of the physical memory.
+ * Only valid if fContiguous == true, ignored otherwise.
+ * @param fFlagsLnx The page allocation flags (GPFs).
+ * @param fContiguous Whether the allocation must be contiguous.
+ * @param rcNoMem What to return when we're out of pages.
+ */
+static int rtR0MemObjLinuxAllocPages(PRTR0MEMOBJLNX *ppMemLnx, RTR0MEMOBJTYPE enmType, size_t cb,
+ size_t uAlignment, unsigned fFlagsLnx, bool fContiguous, int rcNoMem)
+{
+ size_t iPage;
+ size_t const cPages = cb >> PAGE_SHIFT;
+ struct page *paPages;
+
+ /*
+ * Allocate a memory object structure that's large enough to contain
+ * the page pointer array.
+ */
+ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJLNX, apPages[cPages]), enmType, NULL, cb);
+ if (!pMemLnx)
+ return VERR_NO_MEMORY;
+ pMemLnx->cPages = cPages;
+
+ if (cPages > 255)
+ {
+# ifdef __GFP_REPEAT
+ /* Try hard to allocate the memory, but the allocation attempt might fail. */
+ fFlagsLnx |= __GFP_REPEAT;
+# endif
+# ifdef __GFP_NOMEMALLOC
+ /* Introduced with Linux 2.6.12: Don't use emergency reserves */
+ fFlagsLnx |= __GFP_NOMEMALLOC;
+# endif
+ }
+
+ /*
+ * Allocate the pages.
+ * For small allocations we'll try contiguous first and then fall back on page by page.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
+ if ( fContiguous
+ || cb <= PAGE_SIZE * 2)
+ {
+# ifdef VBOX_USE_INSERT_PAGE
+ paPages = alloc_pages(fFlagsLnx | __GFP_COMP | __GFP_NOWARN, rtR0MemObjLinuxOrder(cPages));
+# else
+ paPages = alloc_pages(fFlagsLnx | __GFP_NOWARN, rtR0MemObjLinuxOrder(cPages));
+# endif
+ if (paPages)
+ {
+ fContiguous = true;
+ for (iPage = 0; iPage < cPages; iPage++)
+ pMemLnx->apPages[iPage] = &paPages[iPage];
+ }
+ else if (fContiguous)
+ {
+ rtR0MemObjDelete(&pMemLnx->Core);
+ return rcNoMem;
+ }
+ }
+
+ if (!fContiguous)
+ {
+ for (iPage = 0; iPage < cPages; iPage++)
+ {
+ pMemLnx->apPages[iPage] = alloc_page(fFlagsLnx | __GFP_NOWARN);
+ if (RT_UNLIKELY(!pMemLnx->apPages[iPage]))
+ {
+ while (iPage-- > 0)
+ __free_page(pMemLnx->apPages[iPage]);
+ rtR0MemObjDelete(&pMemLnx->Core);
+ return rcNoMem;
+ }
+ }
+ }
+
+#else /* < 2.4.22 */
+ /** @todo figure out why we didn't allocate page-by-page on 2.4.21 and older... */
+ paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cPages));
+ if (!paPages)
+ {
+ rtR0MemObjDelete(&pMemLnx->Core);
+ return rcNoMem;
+ }
+ for (iPage = 0; iPage < cPages; iPage++)
+ {
+ pMemLnx->apPages[iPage] = &paPages[iPage];
+ MY_SET_PAGES_EXEC(pMemLnx->apPages[iPage], 1);
+ if (PageHighMem(pMemLnx->apPages[iPage]))
+ BUG();
+ }
+
+ fContiguous = true;
+#endif /* < 2.4.22 */
+ pMemLnx->fContiguous = fContiguous;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
+ /*
+ * Reserve the pages.
+ *
+ * Linux >= 4.5 with CONFIG_DEBUG_VM panics when setting PG_reserved on compound
+ * pages. According to Michal Hocko this shouldn't be necessary anyway because
+ * as pages which are not on the LRU list are never evictable.
+ */
+ for (iPage = 0; iPage < cPages; iPage++)
+ SetPageReserved(pMemLnx->apPages[iPage]);
+#endif
+
+ /*
+ * Note that the physical address of memory allocated with alloc_pages(flags, order)
+ * is always 2^(PAGE_SHIFT+order)-aligned.
+ */
+ if ( fContiguous
+ && uAlignment > PAGE_SIZE)
+ {
+ /*
+ * Check for alignment constraints. The physical address of memory allocated with
+ * alloc_pages(flags, order) is always 2^(PAGE_SHIFT+order)-aligned.
+ */
+ if (RT_UNLIKELY(page_to_phys(pMemLnx->apPages[0]) & (uAlignment - 1)))
+ {
+ /*
+ * This should never happen!
+ */
+ printk("rtR0MemObjLinuxAllocPages(cb=0x%lx, uAlignment=0x%lx): alloc_pages(..., %d) returned physical memory at 0x%lx!\n",
+ (unsigned long)cb, (unsigned long)uAlignment, rtR0MemObjLinuxOrder(cPages), (unsigned long)page_to_phys(pMemLnx->apPages[0]));
+ rtR0MemObjLinuxFreePages(pMemLnx);
+ return rcNoMem;
+ }
+ }
+
+ *ppMemLnx = pMemLnx;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Frees the physical pages allocated by the rtR0MemObjLinuxAllocPages() call.
+ *
+ * This method does NOT free the object.
+ *
+ * @param pMemLnx The object which physical pages should be freed.
+ */
+static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx)
+{
+ size_t iPage = pMemLnx->cPages;
+ if (iPage > 0)
+ {
+ /*
+ * Restore the page flags.
+ */
+ while (iPage-- > 0)
+ {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
+ /*
+ * See SetPageReserved() in rtR0MemObjLinuxAllocPages()
+ */
+ ClearPageReserved(pMemLnx->apPages[iPage]);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
+#else
+ MY_SET_PAGES_NOEXEC(pMemLnx->apPages[iPage], 1);
+#endif
+ }
+
+ /*
+ * Free the pages.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
+ if (!pMemLnx->fContiguous)
+ {
+ iPage = pMemLnx->cPages;
+ while (iPage-- > 0)
+ __free_page(pMemLnx->apPages[iPage]);
+ }
+ else
+#endif
+ __free_pages(pMemLnx->apPages[0], rtR0MemObjLinuxOrder(pMemLnx->cPages));
+
+ pMemLnx->cPages = 0;
+ }
+}
+
+
+/**
+ * Maps the allocation into ring-0.
+ *
+ * This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
+ *
+ * Contiguous mappings that isn't in 'high' memory will already be mapped into kernel
+ * space, so we'll use that mapping if possible. If execute access is required, we'll
+ * play safe and do our own mapping.
+ *
+ * @returns IPRT status code.
+ * @param pMemLnx The linux memory object to map.
+ * @param fExecutable Whether execute access is required.
+ */
+static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool fExecutable)
+{
+ int rc = VINF_SUCCESS;
+
+ /*
+ * Choose mapping strategy.
+ */
+ bool fMustMap = fExecutable
+ || !pMemLnx->fContiguous;
+ if (!fMustMap)
+ {
+ size_t iPage = pMemLnx->cPages;
+ while (iPage-- > 0)
+ if (PageHighMem(pMemLnx->apPages[iPage]))
+ {
+ fMustMap = true;
+ break;
+ }
+ }
+
+ Assert(!pMemLnx->Core.pv);
+ Assert(!pMemLnx->fMappedToRing0);
+
+ if (fMustMap)
+ {
+ /*
+ * Use vmap - 2.4.22 and later.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
+ pgprot_t fPg;
+ pgprot_val(fPg) = _PAGE_PRESENT | _PAGE_RW;
+# ifdef _PAGE_NX
+ if (!fExecutable)
+ pgprot_val(fPg) |= _PAGE_NX;
+# endif
+
+# ifdef VM_MAP
+ pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
+# else
+ pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);
+# endif
+ if (pMemLnx->Core.pv)
+ pMemLnx->fMappedToRing0 = true;
+ else
+ rc = VERR_MAP_FAILED;
+#else /* < 2.4.22 */
+ rc = VERR_NOT_SUPPORTED;
+#endif
+ }
+ else
+ {
+ /*
+ * Use the kernel RAM mapping.
+ */
+ pMemLnx->Core.pv = phys_to_virt(page_to_phys(pMemLnx->apPages[0]));
+ Assert(pMemLnx->Core.pv);
+ }
+
+ return rc;
+}
+
+
+/**
+ * Undoes what rtR0MemObjLinuxVMap() did.
+ *
+ * @param pMemLnx The linux memory object.
+ */
+static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
+ if (pMemLnx->fMappedToRing0)
+ {
+ Assert(pMemLnx->Core.pv);
+ vunmap(pMemLnx->Core.pv);
+ pMemLnx->fMappedToRing0 = false;
+ }
+#else /* < 2.4.22 */
+ Assert(!pMemLnx->fMappedToRing0);
+#endif
+ pMemLnx->Core.pv = NULL;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
+
+ /*
+ * Release any memory that we've allocated or locked.
+ */
+ switch (pMemLnx->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_CONT:
+ case RTR0MEMOBJTYPE_PHYS:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ rtR0MemObjLinuxVUnmap(pMemLnx);
+ rtR0MemObjLinuxFreePages(pMemLnx);
+ break;
+
+ case RTR0MEMOBJTYPE_LOCK:
+ if (pMemLnx->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
+ {
+ struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
+ size_t iPage;
+ Assert(pTask);
+ if (pTask && pTask->mm)
+ down_read(&pTask->mm->mmap_sem);
+
+ iPage = pMemLnx->cPages;
+ while (iPage-- > 0)
+ {
+ if (!PageReserved(pMemLnx->apPages[iPage]))
+ SetPageDirty(pMemLnx->apPages[iPage]);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+ put_page(pMemLnx->apPages[iPage]);
+#else
+ page_cache_release(pMemLnx->apPages[iPage]);
+#endif
+ }
+
+ if (pTask && pTask->mm)
+ up_read(&pTask->mm->mmap_sem);
+ }
+ /* else: kernel memory - nothing to do here. */
+ break;
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ Assert(pMemLnx->Core.pv);
+ if (pMemLnx->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
+ {
+ struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
+ Assert(pTask);
+ if (pTask && pTask->mm)
+ rtR0MemObjLinuxDoMunmap(pMemLnx->Core.pv, pMemLnx->Core.cb, pTask);
+ }
+ else
+ {
+ vunmap(pMemLnx->Core.pv);
+
+ Assert(pMemLnx->cPages == 1 && pMemLnx->apPages[0] != NULL);
+ __free_page(pMemLnx->apPages[0]);
+ pMemLnx->apPages[0] = NULL;
+ pMemLnx->cPages = 0;
+ }
+ pMemLnx->Core.pv = NULL;
+ break;
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ Assert(pMemLnx->cPages == 0); Assert(pMemLnx->Core.pv);
+ if (pMemLnx->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
+ {
+ struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
+ Assert(pTask);
+ if (pTask && pTask->mm)
+ rtR0MemObjLinuxDoMunmap(pMemLnx->Core.pv, pMemLnx->Core.cb, pTask);
+ }
+ else
+ vunmap(pMemLnx->Core.pv);
+ pMemLnx->Core.pv = NULL;
+ break;
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", pMemLnx->Core.enmType));
+ return VERR_INTERNAL_ERROR;
+ }
+ IPRT_LINUX_RESTORE_EFL_ONLY_AC();
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ PRTR0MEMOBJLNX pMemLnx;
+ int rc;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
+ rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, PAGE_SIZE, GFP_HIGHUSER,
+ false /* non-contiguous */, VERR_NO_MEMORY);
+#else
+ rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, PAGE_SIZE, GFP_USER,
+ false /* non-contiguous */, VERR_NO_MEMORY);
+#endif
+ if (RT_SUCCESS(rc))
+ {
+ rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
+ if (RT_SUCCESS(rc))
+ {
+ *ppMem = &pMemLnx->Core;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+ }
+
+ rtR0MemObjLinuxFreePages(pMemLnx);
+ rtR0MemObjDelete(&pMemLnx->Core);
+ }
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ PRTR0MEMOBJLNX pMemLnx;
+ int rc;
+
+ /* Try to avoid GFP_DMA. GFM_DMA32 was introduced with Linux 2.6.15. */
+#if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32)
+ /* ZONE_DMA32: 0-4GB */
+ rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_DMA32,
+ false /* non-contiguous */, VERR_NO_LOW_MEMORY);
+ if (RT_FAILURE(rc))
+#endif
+#ifdef RT_ARCH_AMD64
+ /* ZONE_DMA: 0-16MB */
+ rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_DMA,
+ false /* non-contiguous */, VERR_NO_LOW_MEMORY);
+#else
+# ifdef CONFIG_X86_PAE
+# endif
+ /* ZONE_NORMAL: 0-896MB */
+ rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_USER,
+ false /* non-contiguous */, VERR_NO_LOW_MEMORY);
+#endif
+ if (RT_SUCCESS(rc))
+ {
+ rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
+ if (RT_SUCCESS(rc))
+ {
+ *ppMem = &pMemLnx->Core;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+ }
+
+ rtR0MemObjLinuxFreePages(pMemLnx);
+ rtR0MemObjDelete(&pMemLnx->Core);
+ }
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ PRTR0MEMOBJLNX pMemLnx;
+ int rc;
+
+#if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32)
+ /* ZONE_DMA32: 0-4GB */
+ rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_DMA32,
+ true /* contiguous */, VERR_NO_CONT_MEMORY);
+ if (RT_FAILURE(rc))
+#endif
+#ifdef RT_ARCH_AMD64
+ /* ZONE_DMA: 0-16MB */
+ rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_DMA,
+ true /* contiguous */, VERR_NO_CONT_MEMORY);
+#else
+ /* ZONE_NORMAL (32-bit hosts): 0-896MB */
+ rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_USER,
+ true /* contiguous */, VERR_NO_CONT_MEMORY);
+#endif
+ if (RT_SUCCESS(rc))
+ {
+ rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
+ if (RT_SUCCESS(rc))
+ {
+#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(CONFIG_HIGHMEM64G))
+ size_t iPage = pMemLnx->cPages;
+ while (iPage-- > 0)
+ Assert(page_to_phys(pMemLnx->apPages[iPage]) < _4G);
+#endif
+ pMemLnx->Core.u.Cont.Phys = page_to_phys(pMemLnx->apPages[0]);
+ *ppMem = &pMemLnx->Core;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+ }
+
+ rtR0MemObjLinuxFreePages(pMemLnx);
+ rtR0MemObjDelete(&pMemLnx->Core);
+ }
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+/**
+ * Worker for rtR0MemObjLinuxAllocPhysSub that tries one allocation strategy.
+ *
+ * @returns IPRT status code.
+ * @param ppMemLnx Where to
+ * @param enmType The object type.
+ * @param cb The size of the allocation.
+ * @param uAlignment The alignment of the physical memory.
+ * Only valid for fContiguous == true, ignored otherwise.
+ * @param PhysHighest See rtR0MemObjNativeAllocPhys.
+ * @param fGfp The Linux GFP flags to use for the allocation.
+ */
+static int rtR0MemObjLinuxAllocPhysSub2(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
+ size_t cb, size_t uAlignment, RTHCPHYS PhysHighest, unsigned fGfp)
+{
+ PRTR0MEMOBJLNX pMemLnx;
+ int rc;
+
+ rc = rtR0MemObjLinuxAllocPages(&pMemLnx, enmType, cb, uAlignment, fGfp,
+ enmType == RTR0MEMOBJTYPE_PHYS /* contiguous / non-contiguous */,
+ VERR_NO_PHYS_MEMORY);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ /*
+ * Check the addresses if necessary. (Can be optimized a bit for PHYS.)
+ */
+ if (PhysHighest != NIL_RTHCPHYS)
+ {
+ size_t iPage = pMemLnx->cPages;
+ while (iPage-- > 0)
+ if (page_to_phys(pMemLnx->apPages[iPage]) > PhysHighest)
+ {
+ rtR0MemObjLinuxFreePages(pMemLnx);
+ rtR0MemObjDelete(&pMemLnx->Core);
+ return VERR_NO_MEMORY;
+ }
+ }
+
+ /*
+ * Complete the object.
+ */
+ if (enmType == RTR0MEMOBJTYPE_PHYS)
+ {
+ pMemLnx->Core.u.Phys.PhysBase = page_to_phys(pMemLnx->apPages[0]);
+ pMemLnx->Core.u.Phys.fAllocated = true;
+ }
+ *ppMem = &pMemLnx->Core;
+ return rc;
+}
+
+
+/**
+ * Worker for rtR0MemObjNativeAllocPhys and rtR0MemObjNativeAllocPhysNC.
+ *
+ * @returns IPRT status code.
+ * @param ppMem Where to store the memory object pointer on success.
+ * @param enmType The object type.
+ * @param cb The size of the allocation.
+ * @param uAlignment The alignment of the physical memory.
+ * Only valid for enmType == RTR0MEMOBJTYPE_PHYS, ignored otherwise.
+ * @param PhysHighest See rtR0MemObjNativeAllocPhys.
+ */
+static int rtR0MemObjLinuxAllocPhysSub(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
+ size_t cb, size_t uAlignment, RTHCPHYS PhysHighest)
+{
+ int rc;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * There are two clear cases and that's the <=16MB and anything-goes ones.
+ * When the physical address limit is somewhere in-between those two we'll
+ * just have to try, starting with HIGHUSER and working our way thru the
+ * different types, hoping we'll get lucky.
+ *
+ * We should probably move this physical address restriction logic up to
+ * the page alloc function as it would be more efficient there. But since
+ * we don't expect this to be a performance issue just yet it can wait.
+ */
+ if (PhysHighest == NIL_RTHCPHYS)
+ /* ZONE_HIGHMEM: the whole physical memory */
+ rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_HIGHUSER);
+ else if (PhysHighest <= _1M * 16)
+ /* ZONE_DMA: 0-16MB */
+ rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_DMA);
+ else
+ {
+ rc = VERR_NO_MEMORY;
+ if (RT_FAILURE(rc))
+ /* ZONE_HIGHMEM: the whole physical memory */
+ rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_HIGHUSER);
+ if (RT_FAILURE(rc))
+ /* ZONE_NORMAL: 0-896MB */
+ rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_USER);
+#ifdef GFP_DMA32
+ if (RT_FAILURE(rc))
+ /* ZONE_DMA32: 0-4GB */
+ rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_DMA32);
+#endif
+ if (RT_FAILURE(rc))
+ /* ZONE_DMA: 0-16MB */
+ rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_DMA);
+ }
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+/**
+ * Translates a kernel virtual address to a linux page structure by walking the
+ * page tables.
+ *
+ * @note We do assume that the page tables will not change as we are walking
+ * them. This assumption is rather forced by the fact that I could not
+ * immediately see any way of preventing this from happening. So, we
+ * take some extra care when accessing them.
+ *
+ * Because of this, we don't want to use this function on memory where
+ * attribute changes to nearby pages is likely to cause large pages to
+ * be used or split up. So, don't use this for the linear mapping of
+ * physical memory.
+ *
+ * @returns Pointer to the page structur or NULL if it could not be found.
+ * @param pv The kernel virtual address.
+ */
+static struct page *rtR0MemObjLinuxVirtToPage(void *pv)
+{
+ unsigned long ulAddr = (unsigned long)pv;
+ unsigned long pfn;
+ struct page *pPage;
+ pte_t *pEntry;
+ union
+ {
+ pgd_t Global;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
+ p4d_t Four;
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+ pud_t Upper;
+#endif
+ pmd_t Middle;
+ pte_t Entry;
+ } u;
+
+ /* Should this happen in a situation this code will be called in? And if
+ * so, can it change under our feet? See also
+ * "Documentation/vm/active_mm.txt" in the kernel sources. */
+ if (RT_UNLIKELY(!current->active_mm))
+ return NULL;
+ u.Global = *pgd_offset(current->active_mm, ulAddr);
+ if (RT_UNLIKELY(pgd_none(u.Global)))
+ return NULL;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
+ u.Four = *p4d_offset(&u.Global, ulAddr);
+ if (RT_UNLIKELY(p4d_none(u.Four)))
+ return NULL;
+ if (p4d_large(u.Four))
+ {
+ pPage = p4d_page(u.Four);
+ AssertReturn(pPage, NULL);
+ pfn = page_to_pfn(pPage); /* doing the safe way... */
+ AssertCompile(P4D_SHIFT - PAGE_SHIFT < 31);
+ pfn += (ulAddr >> PAGE_SHIFT) & ((UINT32_C(1) << (P4D_SHIFT - PAGE_SHIFT)) - 1);
+ return pfn_to_page(pfn);
+ }
+ u.Upper = *pud_offset(&u.Four, ulAddr);
+# else /* < 4.12 */
+ u.Upper = *pud_offset(&u.Global, ulAddr);
+# endif /* < 4.12 */
+ if (RT_UNLIKELY(pud_none(u.Upper)))
+ return NULL;
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+ if (pud_large(u.Upper))
+ {
+ pPage = pud_page(u.Upper);
+ AssertReturn(pPage, NULL);
+ pfn = page_to_pfn(pPage); /* doing the safe way... */
+ pfn += (ulAddr >> PAGE_SHIFT) & ((UINT32_C(1) << (PUD_SHIFT - PAGE_SHIFT)) - 1);
+ return pfn_to_page(pfn);
+ }
+# endif
+ u.Middle = *pmd_offset(&u.Upper, ulAddr);
+#else /* < 2.6.11 */
+ u.Middle = *pmd_offset(&u.Global, ulAddr);
+#endif /* < 2.6.11 */
+ if (RT_UNLIKELY(pmd_none(u.Middle)))
+ return NULL;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ if (pmd_large(u.Middle))
+ {
+ pPage = pmd_page(u.Middle);
+ AssertReturn(pPage, NULL);
+ pfn = page_to_pfn(pPage); /* doing the safe way... */
+ pfn += (ulAddr >> PAGE_SHIFT) & ((UINT32_C(1) << (PMD_SHIFT - PAGE_SHIFT)) - 1);
+ return pfn_to_page(pfn);
+ }
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5) || defined(pte_offset_map) /* As usual, RHEL 3 had pte_offset_map earlier. */
+ pEntry = pte_offset_map(&u.Middle, ulAddr);
+#else
+ pEntry = pte_offset(&u.Middle, ulAddr);
+#endif
+ if (RT_UNLIKELY(!pEntry))
+ return NULL;
+ u.Entry = *pEntry;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5) || defined(pte_offset_map)
+ pte_unmap(pEntry);
+#endif
+
+ if (RT_UNLIKELY(!pte_present(u.Entry)))
+ return NULL;
+ return pte_page(u.Entry);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
+{
+ return rtR0MemObjLinuxAllocPhysSub(ppMem, RTR0MEMOBJTYPE_PHYS, cb, uAlignment, PhysHighest);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
+{
+ return rtR0MemObjLinuxAllocPhysSub(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PAGE_SIZE, PhysHighest);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * All we need to do here is to validate that we can use
+ * ioremap on the specified address (32/64-bit dma_addr_t).
+ */
+ PRTR0MEMOBJLNX pMemLnx;
+ dma_addr_t PhysAddr = Phys;
+ AssertMsgReturn(PhysAddr == Phys, ("%#llx\n", (unsigned long long)Phys), VERR_ADDRESS_TOO_BIG);
+
+ pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (!pMemLnx)
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+ }
+
+ pMemLnx->Core.u.Phys.PhysBase = PhysAddr;
+ pMemLnx->Core.u.Phys.fAllocated = false;
+ pMemLnx->Core.u.Phys.uCachePolicy = uCachePolicy;
+ Assert(!pMemLnx->cPages);
+ *ppMem = &pMemLnx->Core;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+/* openSUSE Leap 42.3 detection :-/ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) \
+ && LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) \
+ && defined(FAULT_FLAG_REMOTE)
+# define GET_USER_PAGES_API KERNEL_VERSION(4, 10, 0) /* no typo! */
+#else
+# define GET_USER_PAGES_API LINUX_VERSION_CODE
+#endif
+
+DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ const int cPages = cb >> PAGE_SHIFT;
+ struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
+ struct vm_area_struct **papVMAs;
+ PRTR0MEMOBJLNX pMemLnx;
+ int rc = VERR_NO_MEMORY;
+ int const fWrite = fAccess & RTMEM_PROT_WRITE ? 1 : 0;
+
+ /*
+ * Check for valid task and size overflows.
+ */
+ if (!pTask)
+ return VERR_NOT_SUPPORTED;
+ if (((size_t)cPages << PAGE_SHIFT) != cb)
+ return VERR_OUT_OF_RANGE;
+
+ /*
+ * Allocate the memory object and a temporary buffer for the VMAs.
+ */
+ pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
+ if (!pMemLnx)
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+ }
+
+ papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages);
+ if (papVMAs)
+ {
+ down_read(&pTask->mm->mmap_sem);
+
+ /*
+ * Get user pages.
+ */
+#if GET_USER_PAGES_API >= KERNEL_VERSION(4, 6, 0)
+ if (R0Process == RTR0ProcHandleSelf())
+ rc = get_user_pages(R3Ptr, /* Where from. */
+ cPages, /* How many pages. */
+# if GET_USER_PAGES_API >= KERNEL_VERSION(4, 9, 0)
+ fWrite ? FOLL_WRITE | /* Write to memory. */
+ FOLL_FORCE /* force write access. */
+ : 0, /* Write to memory. */
+# else
+ fWrite, /* Write to memory. */
+ fWrite, /* force write access. */
+# endif
+ &pMemLnx->apPages[0], /* Page array. */
+ papVMAs); /* vmas */
+ /*
+ * Actually this should not happen at the moment as call this function
+ * only for our own process.
+ */
+ else
+ rc = get_user_pages_remote(
+ pTask, /* Task for fault accounting. */
+ pTask->mm, /* Whose pages. */
+ R3Ptr, /* Where from. */
+ cPages, /* How many pages. */
+# if GET_USER_PAGES_API >= KERNEL_VERSION(4, 9, 0)
+ fWrite ? FOLL_WRITE | /* Write to memory. */
+ FOLL_FORCE /* force write access. */
+ : 0, /* Write to memory. */
+# else
+ fWrite, /* Write to memory. */
+ fWrite, /* force write access. */
+# endif
+ &pMemLnx->apPages[0], /* Page array. */
+ papVMAs /* vmas */
+# if GET_USER_PAGES_API >= KERNEL_VERSION(4, 10, 0)
+ , NULL /* locked */
+# endif
+ );
+#else /* GET_USER_PAGES_API < KERNEL_VERSION(4, 6, 0) */
+ rc = get_user_pages(pTask, /* Task for fault accounting. */
+ pTask->mm, /* Whose pages. */
+ R3Ptr, /* Where from. */
+ cPages, /* How many pages. */
+# if GET_USER_PAGES_API >= KERNEL_VERSION(4, 9, 0)
+ fWrite ? FOLL_WRITE | /* Write to memory. */
+ FOLL_FORCE /* force write access. */
+ : 0, /* Write to memory. */
+# else
+ fWrite, /* Write to memory. */
+ fWrite, /* force write access. */
+# endif
+ &pMemLnx->apPages[0], /* Page array. */
+ papVMAs); /* vmas */
+#endif /* GET_USER_PAGES_API < KERNEL_VERSION(4, 6, 0) */
+ if (rc == cPages)
+ {
+ /*
+ * Flush dcache (required?), protect against fork and _really_ pin the page
+ * table entries. get_user_pages() will protect against swapping out the
+ * pages but it will NOT protect against removing page table entries. This
+ * can be achieved with
+ * - using mlock / mmap(..., MAP_LOCKED, ...) from userland. This requires
+ * an appropriate limit set up with setrlimit(..., RLIMIT_MEMLOCK, ...).
+ * Usual Linux distributions support only a limited size of locked pages
+ * (e.g. 32KB).
+ * - setting the PageReserved bit (as we do in rtR0MemObjLinuxAllocPages()
+ * or by
+ * - setting the VM_LOCKED flag. This is the same as doing mlock() without
+ * a range check.
+ */
+ /** @todo The Linux fork() protection will require more work if this API
+ * is to be used for anything but locking VM pages. */
+ while (rc-- > 0)
+ {
+ flush_dcache_page(pMemLnx->apPages[rc]);
+ papVMAs[rc]->vm_flags |= (VM_DONTCOPY | VM_LOCKED);
+ }
+
+ up_read(&pTask->mm->mmap_sem);
+
+ RTMemFree(papVMAs);
+
+ pMemLnx->Core.u.Lock.R0Process = R0Process;
+ pMemLnx->cPages = cPages;
+ Assert(!pMemLnx->fMappedToRing0);
+ *ppMem = &pMemLnx->Core;
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Failed - we need to unlock any pages that we succeeded to lock.
+ */
+ while (rc-- > 0)
+ {
+ if (!PageReserved(pMemLnx->apPages[rc]))
+ SetPageDirty(pMemLnx->apPages[rc]);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+ put_page(pMemLnx->apPages[rc]);
+#else
+ page_cache_release(pMemLnx->apPages[rc]);
+#endif
+ }
+
+ up_read(&pTask->mm->mmap_sem);
+
+ RTMemFree(papVMAs);
+ rc = VERR_LOCK_FAILED;
+ }
+
+ rtR0MemObjDelete(&pMemLnx->Core);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ void *pvLast = (uint8_t *)pv + cb - 1;
+ size_t const cPages = cb >> PAGE_SHIFT;
+ PRTR0MEMOBJLNX pMemLnx;
+ bool fLinearMapping;
+ int rc;
+ uint8_t *pbPage;
+ size_t iPage;
+ NOREF(fAccess);
+
+ if ( !RTR0MemKernelIsValidAddr(pv)
+ || !RTR0MemKernelIsValidAddr(pv + cb))
+ return VERR_INVALID_PARAMETER;
+
+ /*
+ * The lower part of the kernel memory has a linear mapping between
+ * physical and virtual addresses. So we take a short cut here. This is
+ * assumed to be the cleanest way to handle those addresses (and the code
+ * is well tested, though the test for determining it is not very nice).
+ * If we ever decide it isn't we can still remove it.
+ */
+#if 0
+ fLinearMapping = (unsigned long)pvLast < VMALLOC_START;
+#else
+ fLinearMapping = (unsigned long)pv >= (unsigned long)__va(0)
+ && (unsigned long)pvLast < (unsigned long)high_memory;
+#endif
+
+ /*
+ * Allocate the memory object.
+ */
+ pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, pv, cb);
+ if (!pMemLnx)
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+ }
+
+ /*
+ * Gather the pages.
+ * We ASSUME all kernel pages are non-swappable and non-movable.
+ */
+ rc = VINF_SUCCESS;
+ pbPage = (uint8_t *)pvLast;
+ iPage = cPages;
+ if (!fLinearMapping)
+ {
+ while (iPage-- > 0)
+ {
+ struct page *pPage = rtR0MemObjLinuxVirtToPage(pbPage);
+ if (RT_UNLIKELY(!pPage))
+ {
+ rc = VERR_LOCK_FAILED;
+ break;
+ }
+ pMemLnx->apPages[iPage] = pPage;
+ pbPage -= PAGE_SIZE;
+ }
+ }
+ else
+ {
+ while (iPage-- > 0)
+ {
+ pMemLnx->apPages[iPage] = virt_to_page(pbPage);
+ pbPage -= PAGE_SIZE;
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Complete the memory object and return.
+ */
+ pMemLnx->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
+ pMemLnx->cPages = cPages;
+ Assert(!pMemLnx->fMappedToRing0);
+ *ppMem = &pMemLnx->Core;
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ rtR0MemObjDelete(&pMemLnx->Core);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
+ IPRT_LINUX_SAVE_EFL_AC();
+ const size_t cPages = cb >> PAGE_SHIFT;
+ struct page *pDummyPage;
+ struct page **papPages;
+
+ /* check for unsupported stuff. */
+ AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Allocate a dummy page and create a page pointer array for vmap such that
+ * the dummy page is mapped all over the reserved area.
+ */
+ pDummyPage = alloc_page(GFP_HIGHUSER | __GFP_NOWARN);
+ if (pDummyPage)
+ {
+ papPages = RTMemAlloc(sizeof(*papPages) * cPages);
+ if (papPages)
+ {
+ void *pv;
+ size_t iPage = cPages;
+ while (iPage-- > 0)
+ papPages[iPage] = pDummyPage;
+# ifdef VM_MAP
+ pv = vmap(papPages, cPages, VM_MAP, PAGE_KERNEL_RO);
+# else
+ pv = vmap(papPages, cPages, VM_ALLOC, PAGE_KERNEL_RO);
+# endif
+ RTMemFree(papPages);
+ if (pv)
+ {
+ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
+ if (pMemLnx)
+ {
+ pMemLnx->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
+ pMemLnx->cPages = 1;
+ pMemLnx->apPages[0] = pDummyPage;
+ *ppMem = &pMemLnx->Core;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+ vunmap(pv);
+ }
+ }
+ __free_page(pDummyPage);
+ }
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+
+#else /* < 2.4.22 */
+ /*
+ * Could probably use ioremap here, but the caller is in a better position than us
+ * to select some safe physical memory.
+ */
+ return VERR_NOT_SUPPORTED;
+#endif
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ PRTR0MEMOBJLNX pMemLnx;
+ void *pv;
+ struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
+ if (!pTask)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Check that the specified alignment is supported.
+ */
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Let rtR0MemObjLinuxDoMmap do the difficult bits.
+ */
+ pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, cb, uAlignment, pTask, RTMEM_PROT_NONE);
+ if (pv == (void *)-1)
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+ }
+
+ pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
+ if (!pMemLnx)
+ {
+ rtR0MemObjLinuxDoMunmap(pv, cb, pTask);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+ }
+
+ pMemLnx->Core.u.ResVirt.R0Process = R0Process;
+ *ppMem = &pMemLnx->Core;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap,
+ void *pvFixed, size_t uAlignment,
+ unsigned fProt, size_t offSub, size_t cbSub)
+{
+ int rc = VERR_NO_MEMORY;
+ PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap;
+ PRTR0MEMOBJLNX pMemLnx;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /* Fail if requested to do something we can't. */
+ AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
+ AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Create the IPRT memory object.
+ */
+ pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb);
+ if (pMemLnx)
+ {
+ if (pMemLnxToMap->cPages)
+ {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
+ /*
+ * Use vmap - 2.4.22 and later.
+ */
+ pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */);
+# ifdef VM_MAP
+ pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_MAP, fPg);
+# else
+ pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_ALLOC, fPg);
+# endif
+ if (pMemLnx->Core.pv)
+ {
+ pMemLnx->fMappedToRing0 = true;
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_MAP_FAILED;
+
+#else /* < 2.4.22 */
+ /*
+ * Only option here is to share mappings if possible and forget about fProt.
+ */
+ if (rtR0MemObjIsRing3(pMemToMap))
+ rc = VERR_NOT_SUPPORTED;
+ else
+ {
+ rc = VINF_SUCCESS;
+ if (!pMemLnxToMap->Core.pv)
+ rc = rtR0MemObjLinuxVMap(pMemLnxToMap, !!(fProt & RTMEM_PROT_EXEC));
+ if (RT_SUCCESS(rc))
+ {
+ Assert(pMemLnxToMap->Core.pv);
+ pMemLnx->Core.pv = pMemLnxToMap->Core.pv;
+ }
+ }
+#endif
+ }
+ else
+ {
+ /*
+ * MMIO / physical memory.
+ */
+ Assert(pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemLnxToMap->Core.u.Phys.fAllocated);
+ pMemLnx->Core.pv = pMemLnxToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO
+ ? ioremap_nocache(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb)
+ : ioremap(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb);
+ if (pMemLnx->Core.pv)
+ {
+ /** @todo fix protection. */
+ rc = VINF_SUCCESS;
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ pMemLnx->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
+ *ppMem = &pMemLnx->Core;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+ rtR0MemObjDelete(&pMemLnx->Core);
+ }
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+#ifdef VBOX_USE_PAE_HACK
+/**
+ * Replace the PFN of a PTE with the address of the actual page.
+ *
+ * The caller maps a reserved dummy page at the address with the desired access
+ * and flags.
+ *
+ * This hack is required for older Linux kernels which don't provide
+ * remap_pfn_range().
+ *
+ * @returns 0 on success, -ENOMEM on failure.
+ * @param mm The memory context.
+ * @param ulAddr The mapping address.
+ * @param Phys The physical address of the page to map.
+ */
+static int rtR0MemObjLinuxFixPte(struct mm_struct *mm, unsigned long ulAddr, RTHCPHYS Phys)
+{
+ int rc = -ENOMEM;
+ pgd_t *pgd;
+
+ spin_lock(&mm->page_table_lock);
+
+ pgd = pgd_offset(mm, ulAddr);
+ if (!pgd_none(*pgd) && !pgd_bad(*pgd))
+ {
+ pmd_t *pmd = pmd_offset(pgd, ulAddr);
+ if (!pmd_none(*pmd))
+ {
+ pte_t *ptep = pte_offset_map(pmd, ulAddr);
+ if (ptep)
+ {
+ pte_t pte = *ptep;
+ pte.pte_high &= 0xfff00000;
+ pte.pte_high |= ((Phys >> 32) & 0x000fffff);
+ pte.pte_low &= 0x00000fff;
+ pte.pte_low |= (Phys & 0xfffff000);
+ set_pte(ptep, pte);
+ pte_unmap(ptep);
+ rc = 0;
+ }
+ }
+ }
+
+ spin_unlock(&mm->page_table_lock);
+ return rc;
+}
+#endif /* VBOX_USE_PAE_HACK */
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed,
+ size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
+{
+ struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
+ PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap;
+ int rc = VERR_NO_MEMORY;
+ PRTR0MEMOBJLNX pMemLnx;
+#ifdef VBOX_USE_PAE_HACK
+ struct page *pDummyPage;
+ RTHCPHYS DummyPhys;
+#endif
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Check for restrictions.
+ */
+ if (!pTask)
+ return VERR_NOT_SUPPORTED;
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+#ifdef VBOX_USE_PAE_HACK
+ /*
+ * Allocate a dummy page for use when mapping the memory.
+ */
+ pDummyPage = alloc_page(GFP_USER | __GFP_NOWARN);
+ if (!pDummyPage)
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+ }
+ SetPageReserved(pDummyPage);
+ DummyPhys = page_to_phys(pDummyPage);
+#endif
+
+ /*
+ * Create the IPRT memory object.
+ */
+ pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb);
+ if (pMemLnx)
+ {
+ /*
+ * Allocate user space mapping.
+ */
+ void *pv;
+ pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, pMemLnxToMap->Core.cb, uAlignment, pTask, fProt);
+ if (pv != (void *)-1)
+ {
+ /*
+ * Map page by page into the mmap area.
+ * This is generic, paranoid and not very efficient.
+ */
+ pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, false /* user */);
+ unsigned long ulAddrCur = (unsigned long)pv;
+ const size_t cPages = pMemLnxToMap->Core.cb >> PAGE_SHIFT;
+ size_t iPage;
+
+ down_write(&pTask->mm->mmap_sem);
+
+ rc = VINF_SUCCESS;
+ if (pMemLnxToMap->cPages)
+ {
+ for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE)
+ {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+ RTHCPHYS Phys = page_to_phys(pMemLnxToMap->apPages[iPage]);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
+ struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */
+ AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR);
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && defined(RT_ARCH_X86)
+ /* remap_page_range() limitation on x86 */
+ AssertBreakStmt(Phys < _4G, rc = VERR_NO_MEMORY);
+#endif
+
+#if defined(VBOX_USE_INSERT_PAGE) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+ rc = vm_insert_page(vma, ulAddrCur, pMemLnxToMap->apPages[iPage]);
+ /* Thes flags help making 100% sure some bad stuff wont happen (swap, core, ++).
+ * See remap_pfn_range() in mm/memory.c */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+#else
+ vma->vm_flags |= VM_RESERVED;
+#endif
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+ rc = remap_pfn_range(vma, ulAddrCur, page_to_pfn(pMemLnxToMap->apPages[iPage]), PAGE_SIZE, fPg);
+#elif defined(VBOX_USE_PAE_HACK)
+ rc = remap_page_range(vma, ulAddrCur, DummyPhys, PAGE_SIZE, fPg);
+ if (!rc)
+ rc = rtR0MemObjLinuxFixPte(pTask->mm, ulAddrCur, Phys);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
+ rc = remap_page_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg);
+#else /* 2.4 */
+ rc = remap_page_range(ulAddrCur, Phys, PAGE_SIZE, fPg);
+#endif
+ if (rc)
+ {
+ rc = VERR_NO_MEMORY;
+ break;
+ }
+ }
+ }
+ else
+ {
+ RTHCPHYS Phys;
+ if (pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS)
+ Phys = pMemLnxToMap->Core.u.Phys.PhysBase;
+ else if (pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_CONT)
+ Phys = pMemLnxToMap->Core.u.Cont.Phys;
+ else
+ {
+ AssertMsgFailed(("%d\n", pMemLnxToMap->Core.enmType));
+ Phys = NIL_RTHCPHYS;
+ }
+ if (Phys != NIL_RTHCPHYS)
+ {
+ for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE, Phys += PAGE_SIZE)
+ {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
+ struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */
+ AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR);
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && defined(RT_ARCH_X86)
+ /* remap_page_range() limitation on x86 */
+ AssertBreakStmt(Phys < _4G, rc = VERR_NO_MEMORY);
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+ rc = remap_pfn_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg);
+#elif defined(VBOX_USE_PAE_HACK)
+ rc = remap_page_range(vma, ulAddrCur, DummyPhys, PAGE_SIZE, fPg);
+ if (!rc)
+ rc = rtR0MemObjLinuxFixPte(pTask->mm, ulAddrCur, Phys);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
+ rc = remap_page_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg);
+#else /* 2.4 */
+ rc = remap_page_range(ulAddrCur, Phys, PAGE_SIZE, fPg);
+#endif
+ if (rc)
+ {
+ rc = VERR_NO_MEMORY;
+ break;
+ }
+ }
+ }
+ }
+
+#ifdef CONFIG_NUMA_BALANCING
+# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
+# ifdef RHEL_RELEASE_CODE
+# if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)
+# define VBOX_NUMA_HACK_OLD
+# endif
+# endif
+# endif
+ if (RT_SUCCESS(rc))
+ {
+ /** @todo Ugly hack! But right now we have no other means to
+ * disable automatic NUMA page balancing. */
+# ifdef RT_OS_X86
+# ifdef VBOX_NUMA_HACK_OLD
+ pTask->mm->numa_next_reset = jiffies + 0x7fffffffUL;
+# endif
+ pTask->mm->numa_next_scan = jiffies + 0x7fffffffUL;
+# else
+# ifdef VBOX_NUMA_HACK_OLD
+ pTask->mm->numa_next_reset = jiffies + 0x7fffffffffffffffUL;
+# endif
+ pTask->mm->numa_next_scan = jiffies + 0x7fffffffffffffffUL;
+# endif
+ }
+#endif /* CONFIG_NUMA_BALANCING */
+
+ up_write(&pTask->mm->mmap_sem);
+
+ if (RT_SUCCESS(rc))
+ {
+#ifdef VBOX_USE_PAE_HACK
+ __free_page(pDummyPage);
+#endif
+ pMemLnx->Core.pv = pv;
+ pMemLnx->Core.u.Mapping.R0Process = R0Process;
+ *ppMem = &pMemLnx->Core;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Bail out.
+ */
+ rtR0MemObjLinuxDoMunmap(pv, pMemLnxToMap->Core.cb, pTask);
+ }
+ rtR0MemObjDelete(&pMemLnx->Core);
+ }
+#ifdef VBOX_USE_PAE_HACK
+ __free_page(pDummyPage);
+#endif
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
+{
+ NOREF(pMem);
+ NOREF(offSub);
+ NOREF(cbSub);
+ NOREF(fProt);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
+{
+ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
+
+ if (pMemLnx->cPages)
+ return page_to_phys(pMemLnx->apPages[iPage]);
+
+ switch (pMemLnx->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_CONT:
+ return pMemLnx->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
+
+ case RTR0MEMOBJTYPE_PHYS:
+ return pMemLnx->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
+
+ /* the parent knows */
+ case RTR0MEMOBJTYPE_MAPPING:
+ return rtR0MemObjNativeGetPagePhysAddr(pMemLnx->Core.uRel.Child.pParent, iPage);
+
+ /* cPages > 0 */
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_LOCK:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ case RTR0MEMOBJTYPE_PAGE:
+ default:
+ AssertMsgFailed(("%d\n", pMemLnx->Core.enmType));
+ /* fall thru */
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ return NIL_RTHCPHYS;
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/linux/memuserkernel-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/memuserkernel-r0drv-linux.c
new file mode 100644
index 00000000..1de73e58
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/memuserkernel-r0drv-linux.c
@@ -0,0 +1,181 @@
+/* $Id: memuserkernel-r0drv-linux.c $ */
+/** @file
+ * IPRT - User & Kernel Memory, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/mem.h>
+#include <iprt/errcore.h>
+
+
+RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ if (RT_LIKELY(copy_from_user(pvDst, (void *)R3PtrSrc, cb) == 0))
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_ACCESS_DENIED;
+}
+RT_EXPORT_SYMBOL(RTR0MemUserCopyFrom);
+
+
+RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ if (RT_LIKELY(copy_to_user((void *)R3PtrDst, pvSrc, cb) == 0))
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_ACCESS_DENIED;
+}
+RT_EXPORT_SYMBOL(RTR0MemUserCopyTo);
+
+
+RTR0DECL(bool) RTR0MemUserIsValidAddr(RTR3PTR R3Ptr)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+ bool fRc = access_ok((void *)R3Ptr, 1);
+#else
+ bool fRc = access_ok(VERIFY_READ, (void *)R3Ptr, 1);
+#endif
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return fRc;
+}
+RT_EXPORT_SYMBOL(RTR0MemUserIsValidAddr);
+
+
+RTR0DECL(bool) RTR0MemKernelIsValidAddr(void *pv)
+{
+ /* Couldn't find a straight forward way of doing this... */
+#if defined(RT_ARCH_X86) && defined(CONFIG_X86_HIGH_ENTRY)
+ return true; /* ?? */
+#elif defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
+ return (uintptr_t)pv >= PAGE_OFFSET;
+#else
+# error "PORT ME"
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+ return !access_ok(pv, 1);
+#else
+ return !access_ok(VERIFY_READ, pv, 1);
+#endif /* LINUX_VERSION_CODE */
+#endif
+}
+RT_EXPORT_SYMBOL(RTR0MemKernelIsValidAddr);
+
+
+RTR0DECL(bool) RTR0MemAreKrnlAndUsrDifferent(void)
+{
+#if defined(RT_ARCH_X86) && defined(CONFIG_X86_HIGH_ENTRY) /* ?? */
+ return false;
+#else
+ return true;
+#endif
+}
+RT_EXPORT_SYMBOL(RTR0MemAreKrnlAndUsrDifferent);
+
+
+/**
+ * Treats both source and destination as unsafe buffers.
+ */
+static int rtR0MemKernelCopyLnxWorker(void *pvDst, void const *pvSrc, size_t cb)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 55)
+/* _ASM_EXTABLE was introduced in 2.6.25 from what I can tell. Using #ifndef
+ here since it has to be a macro and you never know what someone might have
+ backported to an earlier kernel release. */
+# ifndef _ASM_EXTABLE
+# if ARCH_BITS == 32
+# define _ASM_EXTABLE(a_Instr, a_Resume) \
+ ".section __ex_table,\"a\"\n" \
+ ".balign 4\n" \
+ ".long " #a_Instr "\n" \
+ ".long " #a_Resume "\n" \
+ ".previous\n"
+# else
+# define _ASM_EXTABLE(a_Instr, a_Resume) \
+ ".section __ex_table,\"a\"\n" \
+ ".balign 8\n" \
+ ".quad " #a_Instr "\n" \
+ ".quad " #a_Resume "\n" \
+ ".previous\n"
+# endif
+# endif /* !_ASM_EXTABLE */
+ int rc;
+ IPRT_LINUX_SAVE_EFL_AC(); /* paranoia */
+ if (!cb)
+ return VINF_SUCCESS;
+
+ __asm__ __volatile__ ("cld\n"
+ "1:\n\t"
+ "rep; movsb\n"
+ "2:\n\t"
+ ".section .fixup,\"ax\"\n"
+ "3:\n\t"
+ "movl %4, %0\n\t"
+ "jmp 2b\n\t"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : "=r" (rc),
+ "=D" (pvDst),
+ "=S" (pvSrc),
+ "=c" (cb)
+ : "i" (VERR_ACCESS_DENIED),
+ "0" (VINF_SUCCESS),
+ "1" (pvDst),
+ "2" (pvSrc),
+ "3" (cb)
+ : "memory");
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+#else
+ return VERR_NOT_SUPPORTED;
+#endif
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyFrom(void *pvDst, void const *pvSrc, size_t cb)
+{
+ return rtR0MemKernelCopyLnxWorker(pvDst, pvSrc, cb);
+}
+RT_EXPORT_SYMBOL(RTR0MemKernelCopyFrom);
+
+
+RTR0DECL(int) RTR0MemKernelCopyTo(void *pvDst, void const *pvSrc, size_t cb)
+{
+ return rtR0MemKernelCopyLnxWorker(pvDst, pvSrc, cb);
+}
+RT_EXPORT_SYMBOL(RTR0MemKernelCopyTo);
+
diff --git a/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c
new file mode 100644
index 00000000..ca480683
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c
@@ -0,0 +1,626 @@
+/* $Id: mp-r0drv-linux.c $ */
+/** @file
+ * IPRT - Multiprocessor, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/mp.h>
+#include <iprt/cpuset.h>
+#include <iprt/err.h>
+#include <iprt/asm.h>
+#include <iprt/thread.h>
+#include "r0drv/mp-r0drv.h"
+
+#ifdef nr_cpumask_bits
+# define VBOX_NR_CPUMASK_BITS nr_cpumask_bits
+#else
+# define VBOX_NR_CPUMASK_BITS NR_CPUS
+#endif
+
+
+RTDECL(RTCPUID) RTMpCpuId(void)
+{
+ return smp_processor_id();
+}
+RT_EXPORT_SYMBOL(RTMpCpuId);
+
+
+RTDECL(int) RTMpCurSetIndex(void)
+{
+ return smp_processor_id();
+}
+RT_EXPORT_SYMBOL(RTMpCurSetIndex);
+
+
+RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
+{
+ return *pidCpu = smp_processor_id();
+}
+RT_EXPORT_SYMBOL(RTMpCurSetIndexAndId);
+
+
+RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
+{
+ return idCpu < RTCPUSET_MAX_CPUS && idCpu < VBOX_NR_CPUMASK_BITS ? (int)idCpu : -1;
+}
+RT_EXPORT_SYMBOL(RTMpCpuIdToSetIndex);
+
+
+RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
+{
+ return iCpu < VBOX_NR_CPUMASK_BITS ? (RTCPUID)iCpu : NIL_RTCPUID;
+}
+RT_EXPORT_SYMBOL(RTMpCpuIdFromSetIndex);
+
+
+RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
+{
+ return VBOX_NR_CPUMASK_BITS - 1; //???
+}
+RT_EXPORT_SYMBOL(RTMpGetMaxCpuId);
+
+
+RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
+{
+#if defined(CONFIG_SMP)
+ if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
+ return false;
+
+# if defined(cpu_possible)
+ return cpu_possible(idCpu);
+# else /* < 2.5.29 */
+ return idCpu < (RTCPUID)smp_num_cpus;
+# endif
+#else
+ return idCpu == RTMpCpuId();
+#endif
+}
+RT_EXPORT_SYMBOL(RTMpIsCpuPossible);
+
+
+RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
+{
+ RTCPUID idCpu;
+
+ RTCpuSetEmpty(pSet);
+ idCpu = RTMpGetMaxCpuId();
+ do
+ {
+ if (RTMpIsCpuPossible(idCpu))
+ RTCpuSetAdd(pSet, idCpu);
+ } while (idCpu-- > 0);
+ return pSet;
+}
+RT_EXPORT_SYMBOL(RTMpGetSet);
+
+
+RTDECL(RTCPUID) RTMpGetCount(void)
+{
+#ifdef CONFIG_SMP
+# if defined(CONFIG_HOTPLUG_CPU) /* introduced & uses cpu_present */
+ return num_present_cpus();
+# elif defined(num_possible_cpus)
+ return num_possible_cpus();
+# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+ return smp_num_cpus;
+# else
+ RTCPUSET Set;
+ RTMpGetSet(&Set);
+ return RTCpuSetCount(&Set);
+# endif
+#else
+ return 1;
+#endif
+}
+RT_EXPORT_SYMBOL(RTMpGetCount);
+
+
+RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
+{
+#ifdef CONFIG_SMP
+ if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
+ return false;
+# ifdef cpu_online
+ return cpu_online(idCpu);
+# else /* 2.4: */
+ return cpu_online_map & RT_BIT_64(idCpu);
+# endif
+#else
+ return idCpu == RTMpCpuId();
+#endif
+}
+RT_EXPORT_SYMBOL(RTMpIsCpuOnline);
+
+
+RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
+{
+#ifdef CONFIG_SMP
+ RTCPUID idCpu;
+
+ RTCpuSetEmpty(pSet);
+ idCpu = RTMpGetMaxCpuId();
+ do
+ {
+ if (RTMpIsCpuOnline(idCpu))
+ RTCpuSetAdd(pSet, idCpu);
+ } while (idCpu-- > 0);
+#else
+ RTCpuSetEmpty(pSet);
+ RTCpuSetAdd(pSet, RTMpCpuId());
+#endif
+ return pSet;
+}
+RT_EXPORT_SYMBOL(RTMpGetOnlineSet);
+
+
+RTDECL(RTCPUID) RTMpGetOnlineCount(void)
+{
+#ifdef CONFIG_SMP
+# if defined(num_online_cpus)
+ return num_online_cpus();
+# else
+ RTCPUSET Set;
+ RTMpGetOnlineSet(&Set);
+ return RTCpuSetCount(&Set);
+# endif
+#else
+ return 1;
+#endif
+}
+RT_EXPORT_SYMBOL(RTMpGetOnlineCount);
+
+
+RTDECL(bool) RTMpIsCpuWorkPending(void)
+{
+ /** @todo (not used on non-Windows platforms yet). */
+ return false;
+}
+RT_EXPORT_SYMBOL(RTMpIsCpuWorkPending);
+
+
+/**
+ * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER.
+ *
+ * @param pvInfo Pointer to the RTMPARGS package.
+ */
+static void rtmpLinuxWrapper(void *pvInfo)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
+ ASMAtomicIncU32(&pArgs->cHits);
+ pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
+}
+
+#ifdef CONFIG_SMP
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+/**
+ * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER, does hit
+ * increment after calling the worker.
+ *
+ * @param pvInfo Pointer to the RTMPARGS package.
+ */
+static void rtmpLinuxWrapperPostInc(void *pvInfo)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
+ pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
+ ASMAtomicIncU32(&pArgs->cHits);
+}
+# endif
+
+
+/**
+ * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER.
+ *
+ * @param pvInfo Pointer to the RTMPARGS package.
+ */
+static void rtmpLinuxAllWrapper(void *pvInfo)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
+ PRTCPUSET pWorkerSet = pArgs->pWorkerSet;
+ RTCPUID idCpu = RTMpCpuId();
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+
+ if (RTCpuSetIsMember(pWorkerSet, idCpu))
+ {
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ RTCpuSetDel(pWorkerSet, idCpu);
+ }
+}
+
+#endif /* CONFIG_SMP */
+
+RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ RTMPARGS Args;
+ RTCPUSET OnlineSet;
+ RTCPUID idCpu;
+#ifdef CONFIG_SMP
+ uint32_t cLoops;
+#endif
+
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = NIL_RTCPUID;
+ Args.cHits = 0;
+
+ RTThreadPreemptDisable(&PreemptState);
+ RTMpGetOnlineSet(&OnlineSet);
+ Args.pWorkerSet = &OnlineSet;
+ idCpu = RTMpCpuId();
+
+#ifdef CONFIG_SMP
+ if (RTCpuSetCount(&OnlineSet) > 1)
+ {
+ /* Fire the function on all other CPUs without waiting for completion. */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ int rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */);
+# else
+ int rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* retry */, 0 /* wait */);
+# endif
+ Assert(!rc); NOREF(rc);
+ }
+#endif
+
+ /* Fire the function on this CPU. */
+ Args.pfnWorker(idCpu, Args.pvUser1, Args.pvUser2);
+ RTCpuSetDel(Args.pWorkerSet, idCpu);
+
+#ifdef CONFIG_SMP
+ /* Wait for all of them finish. */
+ cLoops = 64000;
+ while (!RTCpuSetIsEmpty(Args.pWorkerSet))
+ {
+ /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */
+ if (!cLoops--)
+ {
+ RTCPUSET OnlineSetNow;
+ RTMpGetOnlineSet(&OnlineSetNow);
+ RTCpuSetAnd(Args.pWorkerSet, &OnlineSetNow);
+
+ cLoops = 64000;
+ }
+
+ ASMNopPause();
+ }
+#endif
+
+ RTThreadPreemptRestore(&PreemptState);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTMpOnAll);
+
+
+RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+#ifdef CONFIG_SMP
+ IPRT_LINUX_SAVE_EFL_AC();
+ int rc;
+ RTMPARGS Args;
+
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = NIL_RTCPUID;
+ Args.cHits = 0;
+
+ RTThreadPreemptDisable(&PreemptState);
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ rc = smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */);
+# else /* older kernels */
+ rc = smp_call_function(rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
+# endif /* older kernels */
+ RTThreadPreemptRestore(&PreemptState);
+
+ Assert(rc == 0); NOREF(rc);
+ IPRT_LINUX_RESTORE_EFL_AC();
+#else
+ RT_NOREF(pfnWorker, pvUser1, pvUser2);
+#endif
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTMpOnOthers);
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) && defined(CONFIG_SMP)
+/**
+ * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
+ * employed by RTMpOnPair on older kernels that lacks smp_call_function_many.
+ *
+ * @param pvInfo Pointer to the RTMPARGS package.
+ */
+static void rtMpLinuxOnPairWrapper(void *pvInfo)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
+ RTCPUID idCpu = RTMpCpuId();
+
+ if ( idCpu == pArgs->idCpu
+ || idCpu == pArgs->idCpu2)
+ {
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ ASMAtomicIncU32(&pArgs->cHits);
+ }
+}
+#endif
+
+
+RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+#ifdef CONFIG_SMP
+ IPRT_LINUX_SAVE_EFL_AC();
+ int rc;
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+
+ AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
+
+ /*
+ * Check that both CPUs are online before doing the broadcast call.
+ */
+ RTThreadPreemptDisable(&PreemptState);
+ if ( RTMpIsCpuOnline(idCpu1)
+ && RTMpIsCpuOnline(idCpu2))
+ {
+ /*
+ * Use the smp_call_function variant taking a cpu mask where available,
+ * falling back on broadcast with filter. Slight snag if one of the
+ * CPUs is the one we're running on, we must do the call and the post
+ * call wait ourselves.
+ */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+ /* 2.6.28 introduces CONFIG_CPUMASK_OFFSTACK */
+ cpumask_var_t DstCpuMask;
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ cpumask_t DstCpuMask;
+# endif
+ RTCPUID idCpuSelf = RTMpCpuId();
+ bool const fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2;
+ RTMPARGS Args;
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = idCpu1;
+ Args.idCpu2 = idCpu2;
+ Args.cHits = 0;
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
+ if (!zalloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
+ return VERR_NO_MEMORY;
+ cpumask_set_cpu(idCpu1, DstCpuMask);
+ cpumask_set_cpu(idCpu2, DstCpuMask);
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+ if (!alloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
+ return VERR_NO_MEMORY;
+ cpumask_clear(DstCpuMask);
+ cpumask_set_cpu(idCpu1, DstCpuMask);
+ cpumask_set_cpu(idCpu2, DstCpuMask);
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ cpus_clear(DstCpuMask);
+ cpu_set(idCpu1, DstCpuMask);
+ cpu_set(idCpu2, DstCpuMask);
+# endif
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+ smp_call_function_many(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
+ rc = 0;
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ rc = smp_call_function_mask(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
+# else /* older kernels */
+ rc = smp_call_function(rtMpLinuxOnPairWrapper, &Args, 0 /* retry */, !fCallSelf /* wait */);
+# endif /* older kernels */
+ Assert(rc == 0);
+
+ /* Call ourselves if necessary and wait for the other party to be done. */
+ if (fCallSelf)
+ {
+ uint32_t cLoops = 0;
+ rtmpLinuxWrapper(&Args);
+ while (ASMAtomicReadU32(&Args.cHits) < 2)
+ {
+ if ((cLoops & 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf == idCpu1 ? idCpu2 : idCpu1))
+ break;
+ cLoops++;
+ ASMNopPause();
+ }
+ }
+
+ Assert(Args.cHits <= 2);
+ if (Args.cHits == 2)
+ rc = VINF_SUCCESS;
+ else if (Args.cHits == 1)
+ rc = VERR_NOT_ALL_CPUS_SHOWED;
+ else if (Args.cHits == 0)
+ rc = VERR_CPU_OFFLINE;
+ else
+ rc = VERR_CPU_IPE_1;
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+ free_cpumask_var(DstCpuMask);
+# endif
+ }
+ /*
+ * A CPU must be present to be considered just offline.
+ */
+ else if ( RTMpIsCpuPresent(idCpu1)
+ && RTMpIsCpuPresent(idCpu2))
+ rc = VERR_CPU_OFFLINE;
+ else
+ rc = VERR_CPU_NOT_FOUND;
+ RTThreadPreemptRestore(&PreemptState);;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+
+#else /* !CONFIG_SMP */
+ RT_NOREF(idCpu1, idCpu2, fFlags, pfnWorker, pvUser1, pvUser2);
+ return VERR_CPU_NOT_FOUND;
+#endif /* !CONFIG_SMP */
+}
+RT_EXPORT_SYMBOL(RTMpOnPair);
+
+
+RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
+{
+ return true;
+}
+RT_EXPORT_SYMBOL(RTMpOnPairIsConcurrentExecSupported);
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && defined(CONFIG_SMP)
+/**
+ * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
+ * employed by RTMpOnSpecific on older kernels that lacks smp_call_function_single.
+ *
+ * @param pvInfo Pointer to the RTMPARGS package.
+ */
+static void rtmpOnSpecificLinuxWrapper(void *pvInfo)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
+ RTCPUID idCpu = RTMpCpuId();
+
+ if (idCpu == pArgs->idCpu)
+ {
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ ASMAtomicIncU32(&pArgs->cHits);
+ }
+}
+#endif
+
+
+RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ int rc;
+ RTMPARGS Args;
+
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = idCpu;
+ Args.cHits = 0;
+
+ if (!RTMpIsCpuPossible(idCpu))
+ return VERR_CPU_NOT_FOUND;
+
+ RTThreadPreemptDisable(&PreemptState);
+ if (idCpu != RTMpCpuId())
+ {
+#ifdef CONFIG_SMP
+ if (RTMpIsCpuOnline(idCpu))
+ {
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+ rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
+# else /* older kernels */
+ rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
+# endif /* older kernels */
+ Assert(rc == 0);
+ rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
+ }
+ else
+#endif /* CONFIG_SMP */
+ rc = VERR_CPU_OFFLINE;
+ }
+ else
+ {
+ rtmpLinuxWrapper(&Args);
+ rc = VINF_SUCCESS;
+ }
+ RTThreadPreemptRestore(&PreemptState);;
+
+ NOREF(rc);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+RT_EXPORT_SYMBOL(RTMpOnSpecific);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) && defined(CONFIG_SMP)
+/**
+ * Dummy callback used by RTMpPokeCpu.
+ *
+ * @param pvInfo Ignored.
+ */
+static void rtmpLinuxPokeCpuCallback(void *pvInfo)
+{
+ NOREF(pvInfo);
+}
+#endif
+
+
+RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+ IPRT_LINUX_SAVE_EFL_AC();
+ int rc;
+ if (RTMpIsCpuPossible(idCpu))
+ {
+ if (RTMpIsCpuOnline(idCpu))
+ {
+# ifdef CONFIG_SMP
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* wait */);
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+ rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* retry */, 0 /* wait */);
+# else /* older kernels */
+# error oops
+# endif /* older kernels */
+ Assert(rc == 0);
+# endif /* CONFIG_SMP */
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_CPU_OFFLINE;
+ }
+ else
+ rc = VERR_CPU_NOT_FOUND;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+
+#else /* older kernels */
+ /* no unicast here? */
+ return VERR_NOT_SUPPORTED;
+#endif /* older kernels */
+}
+RT_EXPORT_SYMBOL(RTMpPokeCpu);
+
+
+RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
+{
+ return true;
+}
+RT_EXPORT_SYMBOL(RTMpOnAllIsConcurrentSafe);
+
diff --git a/src/VBox/Runtime/r0drv/linux/mpnotification-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/mpnotification-r0drv-linux.c
new file mode 100644
index 00000000..cec82d7a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/mpnotification-r0drv-linux.c
@@ -0,0 +1,248 @@
+/* $Id: mpnotification-r0drv-linux.c $ */
+/** @file
+ * IPRT - Multiprocessor Event Notifications, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/errcore.h>
+#include <iprt/cpuset.h>
+#include <iprt/thread.h>
+#include "r0drv/mp-r0drv.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+
+static enum cpuhp_state g_rtR0MpOnline;
+
+/*
+ * Linux 4.10 completely removed CPU notifiers. So let's switch to CPU hotplug
+ * notification.
+ */
+
+static int rtR0MpNotificationLinuxOnline(unsigned int cpu)
+{
+ RTCPUID idCpu = RTMpCpuIdFromSetIndex(cpu);
+ rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu);
+ return 0;
+}
+
+static int rtR0MpNotificationLinuxOffline(unsigned int cpu)
+{
+ RTCPUID idCpu = RTMpCpuIdFromSetIndex(cpu);
+ rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu);
+ return 0;
+}
+
+DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
+{
+ int rc;
+ IPRT_LINUX_SAVE_EFL_AC();
+ rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "vboxdrv:online",
+ rtR0MpNotificationLinuxOnline, rtR0MpNotificationLinuxOffline);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ /*
+ * cpuhp_setup_state_nocalls() returns a positive state number for
+ * CPUHP_AP_ONLINE_DYN or -ENOSPC if there is no free slot available
+ * (see cpuhp_reserve_state / definition of CPUHP_AP_ONLINE_DYN).
+ */
+ AssertMsgReturn(rc > 0, ("%d\n", rc), RTErrConvertFromErrno(rc));
+ g_rtR0MpOnline = rc;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ cpuhp_remove_state_nocalls(g_rtR0MpOnline);
+ IPRT_LINUX_RESTORE_EFL_AC();
+}
+
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 71) && defined(CONFIG_SMP)
+
+static int rtMpNotificationLinuxCallback(struct notifier_block *pNotifierBlock, unsigned long ulNativeEvent, void *pvCpu);
+
+/**
+ * The notifier block we use for registering the callback.
+ */
+static struct notifier_block g_NotifierBlock =
+{
+ .notifier_call = rtMpNotificationLinuxCallback,
+ .next = NULL,
+ .priority = 0
+};
+
+# ifdef CPU_DOWN_FAILED
+/**
+ * The set of CPUs we've seen going offline recently.
+ */
+static RTCPUSET g_MpPendingOfflineSet;
+# endif
+
+
+/**
+ * The native callback.
+ *
+ * @returns NOTIFY_DONE.
+ * @param pNotifierBlock Pointer to g_NotifierBlock.
+ * @param ulNativeEvent The native event.
+ * @param pvCpu The cpu id cast into a pointer value.
+ *
+ * @remarks This can fire with preemption enabled and on any CPU.
+ */
+static int rtMpNotificationLinuxCallback(struct notifier_block *pNotifierBlock, unsigned long ulNativeEvent, void *pvCpu)
+{
+ bool fProcessEvent = false;
+ RTCPUID idCpu = (uintptr_t)pvCpu;
+ NOREF(pNotifierBlock);
+
+ /*
+ * Note that redhat/CentOS ported _some_ of the FROZEN macros
+ * back to their 2.6.18-92.1.10.el5 kernel but actually don't
+ * use them. Thus we have to test for both CPU_TASKS_FROZEN and
+ * the individual event variants.
+ */
+ switch (ulNativeEvent)
+ {
+ /*
+ * Pick up online events or failures to go offline.
+ * Ignore failure events for CPUs we didn't see go offline.
+ */
+# ifdef CPU_DOWN_FAILED
+ case CPU_DOWN_FAILED:
+# if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN)
+ case CPU_DOWN_FAILED_FROZEN:
+# endif
+ if (!RTCpuSetIsMember(&g_MpPendingOfflineSet, idCpu))
+ break; /* fProcessEvents = false */
+ /* fall thru */
+# endif
+ case CPU_ONLINE:
+# if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN)
+ case CPU_ONLINE_FROZEN:
+# endif
+# ifdef CPU_DOWN_FAILED
+ RTCpuSetDel(&g_MpPendingOfflineSet, idCpu);
+# endif
+ fProcessEvent = true;
+ break;
+
+ /*
+ * Pick the earliest possible offline event.
+ * The only important thing here is that we get the event and that
+ * it's exactly one.
+ */
+# ifdef CPU_DOWN_PREPARE
+ case CPU_DOWN_PREPARE:
+# if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN)
+ case CPU_DOWN_PREPARE_FROZEN:
+# endif
+ fProcessEvent = true;
+# else
+ case CPU_DEAD:
+# if defined(CPU_TASKS_FROZEN) && defined(CPU_DEAD_FROZEN)
+ case CPU_DEAD_FROZEN:
+# endif
+ /* Don't process CPU_DEAD notifications. */
+# endif
+# ifdef CPU_DOWN_FAILED
+ RTCpuSetAdd(&g_MpPendingOfflineSet, idCpu);
+# endif
+ break;
+ }
+
+ if (!fProcessEvent)
+ return NOTIFY_DONE;
+
+ switch (ulNativeEvent)
+ {
+# ifdef CPU_DOWN_FAILED
+ case CPU_DOWN_FAILED:
+# if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN)
+ case CPU_DOWN_FAILED_FROZEN:
+# endif
+# endif
+ case CPU_ONLINE:
+# if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN)
+ case CPU_ONLINE_FROZEN:
+# endif
+ rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu);
+ break;
+
+# ifdef CPU_DOWN_PREPARE
+ case CPU_DOWN_PREPARE:
+# if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN)
+ case CPU_DOWN_PREPARE_FROZEN:
+# endif
+ rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu);
+ break;
+# endif
+ }
+
+ return NOTIFY_DONE;
+}
+
+
+DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
+{
+ int rc;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+# ifdef CPU_DOWN_FAILED
+ RTCpuSetEmpty(&g_MpPendingOfflineSet);
+# endif
+
+ rc = register_cpu_notifier(&g_NotifierBlock);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ AssertMsgReturn(!rc, ("%d\n", rc), RTErrConvertFromErrno(rc));
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ unregister_cpu_notifier(&g_NotifierBlock);
+ IPRT_LINUX_RESTORE_EFL_AC();
+}
+
+#else /* Not supported / Not needed */
+
+DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
+{
+ return VINF_SUCCESS;
+}
+
+DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
+{
+}
+
+#endif /* Not supported / Not needed */
+
diff --git a/src/VBox/Runtime/r0drv/linux/process-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/process-r0drv-linux.c
new file mode 100644
index 00000000..0a7a334f
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/process-r0drv-linux.c
@@ -0,0 +1,49 @@
+/* $Id: process-r0drv-linux.c $ */
+/** @file
+ * IPRT - Process, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/process.h>
+
+
+RTDECL(RTPROCESS) RTProcSelf(void)
+{
+ return (RTPROCESS)current->tgid;
+}
+RT_EXPORT_SYMBOL(RTProcSelf);
+
+
+RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void)
+{
+ return (RTR0PROCESS)current->tgid;
+}
+RT_EXPORT_SYMBOL(RTR0ProcHandleSelf);
+
diff --git a/src/VBox/Runtime/r0drv/linux/rtStrFormatKernelAddress-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/rtStrFormatKernelAddress-r0drv-linux.c
new file mode 100644
index 00000000..c8b0567e
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/rtStrFormatKernelAddress-r0drv-linux.c
@@ -0,0 +1,56 @@
+/* $Id: rtStrFormatKernelAddress-r0drv-linux.c $ */
+/** @file
+ * IPRT - IPRT String Formatter, ring-0 addresses.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP RTLOGGROUP_STRING
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+#include <iprt/string.h>
+
+#include "internal/string.h"
+
+
+DECLHIDDEN(size_t) rtStrFormatKernelAddress(char *pszBuf, size_t cbBuf, RTR0INTPTR uPtr, signed int cchWidth,
+ signed int cchPrecision, unsigned int fFlags)
+{
+#if !defined(DEBUG) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+ RT_NOREF(cchWidth, cchPrecision);
+ /* use the Linux kernel function which is able to handle "%pK" */
+ static const char s_szFmt[] = "0x%pK";
+ const char *pszFmt = s_szFmt;
+ if (!(fFlags & RTSTR_F_SPECIAL))
+ pszFmt += 2;
+ return scnprintf(pszBuf, cbBuf, pszFmt, uPtr);
+#else
+ Assert(cbBuf >= 64);
+ return RTStrFormatNumber(pszBuf, uPtr, 16, cchWidth, cchPrecision, fFlags);
+#endif
+}
diff --git a/src/VBox/Runtime/r0drv/linux/semevent-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/semevent-r0drv-linux.c
new file mode 100644
index 00000000..bbed6371
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/semevent-r0drv-linux.c
@@ -0,0 +1,279 @@
+/* $Id: semevent-r0drv-linux.c $ */
+/** @file
+ * IPRT - Single Release Event Semaphores, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENT_WITHOUT_REMAPPING
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+
+#include "waitqueue-r0drv-linux.h"
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Linux event semaphore.
+ */
+typedef struct RTSEMEVENTINTERNAL
+{
+ /** Magic value (RTSEMEVENT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The object status - !0 when signaled and 0 when reset. */
+ uint32_t volatile fState;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** The wait queue. */
+ wait_queue_head_t Head;
+} RTSEMEVENTINTERNAL, *PRTSEMEVENTINTERNAL;
+
+
+
+RTDECL(int) RTSemEventCreate(PRTSEMEVENT phEventSem)
+{
+ return RTSemEventCreateEx(phEventSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...)
+{
+ PRTSEMEVENTINTERNAL pThis;
+ IPRT_LINUX_SAVE_EFL_AC();
+ RT_NOREF_PV(hClass); RT_NOREF_PV(pszNameFmt);
+
+ AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER);
+ Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL));
+
+ pThis = (PRTSEMEVENTINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ pThis->u32Magic = RTSEMEVENT_MAGIC;
+ pThis->fState = 0;
+ pThis->cRefs = 1;
+ init_waitqueue_head(&pThis->Head);
+
+ *phEventSem = pThis;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemEventCreate);
+
+
+/**
+ * Retains a reference to the event semaphore.
+ *
+ * @param pThis The event semaphore.
+ */
+DECLINLINE(void) rtR0SemEventLnxRetain(PRTSEMEVENTINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs < 100000); NOREF(cRefs);
+}
+
+
+/**
+ * Releases a reference to the event semaphore.
+ *
+ * @param pThis The event semaphore.
+ */
+DECLINLINE(void) rtR0SemEventLnxRelease(PRTSEMEVENTINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ RTMemFree(pThis);
+}
+
+
+RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = hEventSem;
+ if (pThis == NIL_RTSEMEVENT)
+ return VINF_SUCCESS;
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ Assert(pThis->cRefs > 0);
+
+ /*
+ * Invalidate it and signal the object just in case.
+ */
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENT_MAGIC);
+ ASMAtomicWriteU32(&pThis->fState, 0);
+ Assert(!waitqueue_active(&pThis->Head));
+ wake_up_all(&pThis->Head);
+ rtR0SemEventLnxRelease(pThis);
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemEventDestroy);
+
+
+RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ rtR0SemEventLnxRetain(pThis);
+
+ /*
+ * Signal the event object.
+ */
+ ASMAtomicWriteU32(&pThis->fState, 1);
+ wake_up(&pThis->Head);
+
+ rtR0SemEventLnxRelease(pThis);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemEventSignal);
+
+
+/**
+ * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventWaitEx.
+ * @param uTimeout See RTSemEventWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventLnxWait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ int rc;
+ RT_NOREF_PV(pSrcPos);
+
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ rtR0SemEventLnxRetain(pThis);
+
+ /*
+ * Try grab the event without setting up the wait.
+ */
+ if ( 1 /** @todo check if there are someone waiting already - waitqueue_active, but then what do we do below? */
+ && ASMAtomicCmpXchgU32(&pThis->fState, 0, 1))
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait.
+ */
+ IPRT_LINUX_SAVE_EFL_AC();
+ RTR0SEMLNXWAIT Wait;
+ rc = rtR0SemLnxWaitInit(&Wait, fFlags, uTimeout, &pThis->Head);
+ if (RT_SUCCESS(rc))
+ {
+ IPRT_DEBUG_SEMS_STATE(pThis, 'E');
+ for (;;)
+ {
+ /* The destruction test. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else
+ {
+ rtR0SemLnxWaitPrepare(&Wait);
+
+ /* Check the exit conditions. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else if (ASMAtomicCmpXchgU32(&pThis->fState, 0, 1))
+ rc = VINF_SUCCESS;
+ else if (rtR0SemLnxWaitHasTimedOut(&Wait))
+ rc = VERR_TIMEOUT;
+ else if (rtR0SemLnxWaitWasInterrupted(&Wait))
+ rc = VERR_INTERRUPTED;
+ else
+ {
+ /* Do the wait and then recheck the conditions. */
+ rtR0SemLnxWaitDoIt(&Wait);
+ continue;
+ }
+ }
+ break;
+ }
+
+ rtR0SemLnxWaitDelete(&Wait);
+ IPRT_DEBUG_SEMS_STATE_RC(pThis, 'E', rc);
+ }
+ IPRT_LINUX_RESTORE_EFL_AC();
+ }
+
+ rtR0SemEventLnxRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventWaitEx(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventLnxWait(hEventSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventLnxWait(hEventSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+RT_EXPORT_SYMBOL(RTSemEventWaitEx);
+
+
+RTDECL(int) RTSemEventWaitExDebug(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventLnxWait(hEventSem, fFlags, uTimeout, &SrcPos);
+}
+RT_EXPORT_SYMBOL(RTSemEventWaitExDebug);
+
+
+RTDECL(uint32_t) RTSemEventGetResolution(void)
+{
+ return rtR0SemLnxWaitGetResolution();
+}
+RT_EXPORT_SYMBOL(RTSemEventGetResolution);
+
diff --git a/src/VBox/Runtime/r0drv/linux/semeventmulti-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/semeventmulti-r0drv-linux.c
new file mode 100644
index 00000000..516f2403
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/semeventmulti-r0drv-linux.c
@@ -0,0 +1,344 @@
+/* $Id: semeventmulti-r0drv-linux.c $ */
+/** @file
+ * IPRT - Multiple Release Event Semaphores, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENTMULTI_WITHOUT_REMAPPING
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/lockvalidator.h>
+
+#include "waitqueue-r0drv-linux.h"
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** @name fStateAndGen values
+ * @{ */
+/** The state bit number. */
+#define RTSEMEVENTMULTILNX_STATE_BIT 0
+/** The state mask. */
+#define RTSEMEVENTMULTILNX_STATE_MASK RT_BIT_32(RTSEMEVENTMULTILNX_STATE_BIT)
+/** The generation mask. */
+#define RTSEMEVENTMULTILNX_GEN_MASK ~RTSEMEVENTMULTILNX_STATE_MASK
+/** The generation shift. */
+#define RTSEMEVENTMULTILNX_GEN_SHIFT 1
+/** The initial variable value. */
+#define RTSEMEVENTMULTILNX_STATE_GEN_INIT UINT32_C(0xfffffffc)
+/** @} */
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Linux event semaphore.
+ */
+typedef struct RTSEMEVENTMULTIINTERNAL
+{
+ /** Magic value (RTSEMEVENTMULTI_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The object state bit and generation counter.
+ * The generation counter is incremented every time the object is
+ * signalled. */
+ uint32_t volatile fStateAndGen;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** The wait queue. */
+ wait_queue_head_t Head;
+} RTSEMEVENTMULTIINTERNAL, *PRTSEMEVENTMULTIINTERNAL;
+
+
+
+
+
+RTDECL(int) RTSemEventMultiCreate(PRTSEMEVENTMULTI phEventMultiSem)
+{
+ return RTSemEventMultiCreateEx(phEventMultiSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass,
+ const char *pszNameFmt, ...)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis;
+ IPRT_LINUX_SAVE_EFL_AC();
+ RT_NOREF_PV(hClass); RT_NOREF_PV(pszNameFmt);
+
+ AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
+ pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMEVENTMULTI_MAGIC;
+ pThis->fStateAndGen = RTSEMEVENTMULTILNX_STATE_GEN_INIT;
+ pThis->cRefs = 1;
+ init_waitqueue_head(&pThis->Head);
+
+ *phEventMultiSem = pThis;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiCreate);
+
+
+/**
+ * Retain a reference to the semaphore.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiLnxRetain(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ NOREF(cRefs);
+ Assert(cRefs && cRefs < 100000);
+}
+
+
+/**
+ * Release a reference, destroy the thing if necessary.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiLnxRelease(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ {
+ Assert(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC);
+ RTMemFree(pThis);
+ }
+}
+
+
+RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (pThis == NIL_RTSEMEVENTMULTI)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ Assert(pThis->cRefs > 0);
+
+ /*
+ * Invalidate it and signal the object just in case.
+ */
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENTMULTI_MAGIC);
+ ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTILNX_GEN_MASK);
+ Assert(!waitqueue_active(&pThis->Head));
+ wake_up_all(&pThis->Head);
+ rtR0SemEventMultiLnxRelease(pThis);
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiDestroy);
+
+
+RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ uint32_t fNew;
+ uint32_t fOld;
+
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiLnxRetain(pThis);
+
+ /*
+ * Signal the event object. The cause of the paranoia here is racing to try
+ * deal with racing RTSemEventMultiSignal calls (should probably be
+ * forbidden, but it's relatively easy to handle).
+ */
+ do
+ {
+ fNew = fOld = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ fNew += 1 << RTSEMEVENTMULTILNX_GEN_SHIFT;
+ fNew |= RTSEMEVENTMULTILNX_STATE_MASK;
+ }
+ while (!ASMAtomicCmpXchgU32(&pThis->fStateAndGen, fNew, fOld));
+
+ wake_up_all(&pThis->Head);
+
+ rtR0SemEventMultiLnxRelease(pThis);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiSignal);
+
+
+RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiLnxRetain(pThis);
+
+ /*
+ * Reset it.
+ */
+ ASMAtomicAndU32(&pThis->fStateAndGen, ~RTSEMEVENTMULTILNX_STATE_MASK);
+
+ rtR0SemEventMultiLnxRelease(pThis);
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiReset);
+
+
+/**
+ * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventMultiWaitEx.
+ * @param uTimeout See RTSemEventMultiWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventMultiLnxWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ uint32_t fOrgStateAndGen;
+ int rc;
+ RT_NOREF_PV(pSrcPos);
+
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiLnxRetain(pThis);
+
+ /*
+ * Is the event already signalled or do we have to wait?
+ */
+ fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ if (fOrgStateAndGen & RTSEMEVENTMULTILNX_STATE_MASK)
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait.
+ */
+ RTR0SEMLNXWAIT Wait;
+ IPRT_LINUX_SAVE_EFL_AC();
+ rc = rtR0SemLnxWaitInit(&Wait, fFlags, uTimeout, &pThis->Head);
+ if (RT_SUCCESS(rc))
+ {
+ IPRT_DEBUG_SEMS_STATE(pThis, 'E');
+ for (;;)
+ {
+ /* The destruction test. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else
+ {
+ rtR0SemLnxWaitPrepare(&Wait);
+
+ /* Check the exit conditions. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else if (ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen)
+ rc = VINF_SUCCESS;
+ else if (rtR0SemLnxWaitHasTimedOut(&Wait))
+ rc = VERR_TIMEOUT;
+ else if (rtR0SemLnxWaitWasInterrupted(&Wait))
+ rc = VERR_INTERRUPTED;
+ else
+ {
+ /* Do the wait and then recheck the conditions. */
+ rtR0SemLnxWaitDoIt(&Wait);
+ continue;
+ }
+ }
+ break;
+ }
+
+ rtR0SemLnxWaitDelete(&Wait);
+ IPRT_DEBUG_SEMS_STATE_RC(pThis, 'E', rc);
+ }
+ IPRT_LINUX_RESTORE_EFL_AC();
+ }
+
+ rtR0SemEventMultiLnxRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventMultiWaitEx(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventMultiLnxWait(hEventMultiSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventMultiLnxWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiWaitEx);
+
+
+RTDECL(int) RTSemEventMultiWaitExDebug(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventMultiLnxWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiWaitExDebug);
+
+
+RTDECL(uint32_t) RTSemEventMultiGetResolution(void)
+{
+ return rtR0SemLnxWaitGetResolution();
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiGetResolution);
+
diff --git a/src/VBox/Runtime/r0drv/linux/semfastmutex-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/semfastmutex-r0drv-linux.c
new file mode 100644
index 00000000..3785bcf0
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/semfastmutex-r0drv-linux.c
@@ -0,0 +1,157 @@
+/* $Id: semfastmutex-r0drv-linux.c $ */
+/** @file
+ * IPRT - Fast Mutex Semaphores, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/errcore.h>
+#if defined(RT_STRICT) || defined(IPRT_DEBUG_SEMS)
+# include <iprt/thread.h>
+#endif
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the linux semaphore structure.
+ */
+typedef struct RTSEMFASTMUTEXINTERNAL
+{
+ /** Magic value (RTSEMFASTMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** the linux semaphore. */
+ struct semaphore Semaphore;
+#if defined(RT_STRICT) || defined(IPRT_DEBUG_SEMS)
+ /** For check. */
+ RTNATIVETHREAD volatile Owner;
+#endif
+} RTSEMFASTMUTEXINTERNAL, *PRTSEMFASTMUTEXINTERNAL;
+
+
+RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Allocate.
+ */
+ PRTSEMFASTMUTEXINTERNAL pThis;
+ pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize.
+ */
+ pThis->u32Magic = RTSEMFASTMUTEX_MAGIC;
+ sema_init(&pThis->Semaphore, 1);
+#if defined(RT_STRICT) || defined(IPRT_DEBUG_SEMS)
+ pThis->Owner = NIL_RTNATIVETHREAD;
+#endif
+
+ *phFastMtx = pThis;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemFastMutexCreate);
+
+
+RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx)
+{
+ /*
+ * Validate.
+ */
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ if (pThis == NIL_RTSEMFASTMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+
+ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD);
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemFastMutexDestroy);
+
+
+RTDECL(int) RTSemFastMutexRequest(RTSEMFASTMUTEX hFastMtx)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate.
+ */
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+
+ IPRT_DEBUG_SEMS_STATE(pThis, 'd');
+ down(&pThis->Semaphore);
+#if defined(RT_STRICT) || defined(IPRT_DEBUG_SEMS)
+ IPRT_DEBUG_SEMS_STATE(pThis, 'o');
+ AssertRelease(pThis->Owner == NIL_RTNATIVETHREAD);
+ ASMAtomicUoWriteSize(&pThis->Owner, RTThreadNativeSelf());
+#endif
+
+ IPRT_LINUX_RESTORE_EFL_ONLY_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemFastMutexRequest);
+
+
+RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX hFastMtx)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate.
+ */
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+
+#if defined(RT_STRICT) || defined(IPRT_DEBUG_SEMS)
+ AssertRelease(pThis->Owner == RTThreadNativeSelf());
+ ASMAtomicUoWriteSize(&pThis->Owner, NIL_RTNATIVETHREAD);
+#endif
+ up(&pThis->Semaphore);
+ IPRT_DEBUG_SEMS_STATE(pThis, 'u');
+
+ IPRT_LINUX_RESTORE_EFL_ONLY_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemFastMutexRelease);
+
diff --git a/src/VBox/Runtime/r0drv/linux/semmutex-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/semmutex-r0drv-linux.c
new file mode 100644
index 00000000..5a0a0ef5
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/semmutex-r0drv-linux.c
@@ -0,0 +1,421 @@
+/* $Id: semmutex-r0drv-linux.c $ */
+/** @file
+ * IPRT - Mutex Semaphores, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMMUTEX_WITHOUT_REMAPPING
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/mem.h>
+#include <iprt/err.h>
+#include <iprt/list.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+typedef struct RTSEMMUTEXLNXWAITER
+{
+ /** The list entry. */
+ RTLISTNODE ListEntry;
+ /** The waiting task. */
+ struct task_struct *pTask;
+ /** Why did we wake up? */
+ enum
+ {
+ /** Wakeup to take the semaphore. */
+ RTSEMMUTEXLNXWAITER_WAKEUP,
+ /** Mutex is being destroyed. */
+ RTSEMMUTEXLNXWAITER_DESTROYED,
+ /** Some other reason. */
+ RTSEMMUTEXLNXWAITER_OTHER
+ } volatile enmReason;
+} RTSEMMUTEXLNXWAITER, *PRTSEMMUTEXLNXWAITER;
+
+/**
+ * Wrapper for the linux semaphore structure.
+ */
+typedef struct RTSEMMUTEXINTERNAL
+{
+ /** Magic value (RTSEMMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** The number of recursions. */
+ uint32_t cRecursions;
+ /** The list of waiting threads. */
+ RTLISTANCHOR WaiterList;
+ /** The current owner, NULL if none. */
+ struct task_struct *pOwnerTask;
+ /** The number of references to this piece of memory. This is used to
+ * prevent it from being kicked from underneath us while waiting. */
+ uint32_t volatile cRefs;
+ /** The spinlock protecting the members and falling asleep. */
+ spinlock_t Spinlock;
+} RTSEMMUTEXINTERNAL, *PRTSEMMUTEXINTERNAL;
+
+
+RTDECL(int) RTSemMutexCreate(PRTSEMMUTEX phMtx)
+{
+ int rc = VINF_SUCCESS;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Allocate.
+ */
+ PRTSEMMUTEXINTERNAL pThis;
+ pThis = (PRTSEMMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ /*
+ * Initialize.
+ */
+ pThis->u32Magic = RTSEMMUTEX_MAGIC;
+ pThis->cRecursions = 0;
+ pThis->pOwnerTask = NULL;
+ pThis->cRefs = 1;
+ RTListInit(&pThis->WaiterList);
+ spin_lock_init(&pThis->Spinlock);
+
+ *phMtx = pThis;
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+RT_EXPORT_SYMBOL(RTSemMutexCreate);
+
+
+RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMtx)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMtx;
+ PRTSEMMUTEXLNXWAITER pCur;
+ unsigned long fSavedIrq;
+
+ /*
+ * Validate.
+ */
+ if (pThis == NIL_RTSEMMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+
+ /*
+ * Kill it, kick waiters and release it.
+ */
+ AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTSEMMUTEX_MAGIC_DEAD, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE);
+
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ spin_lock_irqsave(&pThis->Spinlock, fSavedIrq);
+ RTListForEach(&pThis->WaiterList, pCur, RTSEMMUTEXLNXWAITER, ListEntry)
+ {
+ pCur->enmReason = RTSEMMUTEXLNXWAITER_DESTROYED;
+ wake_up_process(pCur->pTask);
+ }
+
+ if (ASMAtomicDecU32(&pThis->cRefs) != 0)
+ spin_unlock_irqrestore(&pThis->Spinlock, fSavedIrq);
+ else
+ {
+ spin_unlock_irqrestore(&pThis->Spinlock, fSavedIrq);
+ RTMemFree(pThis);
+ }
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSemMutexDestroy);
+
+
+/**
+ * Worker for rtSemMutexLinuxRequest that handles the case where we go to sleep.
+ *
+ * @returns VINF_SUCCESS, VERR_INTERRUPTED, VERR_TIMEOUT or VERR_SEM_DESTROYED.
+ * Returns without owning the spinlock.
+ * @param pThis The mutex instance.
+ * @param cMillies The timeout.
+ * @param fInterruptible The wait type.
+ * @param fSavedIrq The saved IRQ flags.
+ */
+static int rtSemMutexLinuxRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
+ bool fInterruptible, unsigned long fSavedIrq)
+{
+ struct task_struct *pSelf = current;
+ int rc = VERR_TIMEOUT;
+ long lTimeout = cMillies == RT_INDEFINITE_WAIT ? MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(cMillies);
+ RTSEMMUTEXLNXWAITER Waiter;
+
+ IPRT_DEBUG_SEMS_STATE(pThis, 'm');
+
+ /*
+ * Grab a reference to the mutex and add ourselves to the waiter list.
+ */
+ ASMAtomicIncU32(&pThis->cRefs);
+
+ Waiter.pTask = pSelf;
+ Waiter.enmReason = RTSEMMUTEXLNXWAITER_OTHER;
+ RTListAppend(&pThis->WaiterList, &Waiter.ListEntry);
+
+ /*
+ * Do the waiting.
+ */
+ for (;;)
+ {
+ /* Check signal and timeout conditions. */
+ if ( fInterruptible
+ && signal_pending(pSelf))
+ {
+ rc = VERR_INTERRUPTED;
+ break;
+ }
+
+ if (!lTimeout)
+ break;
+
+ /* Go to sleep. */
+ set_current_state(fInterruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&pThis->Spinlock);
+
+ lTimeout = schedule_timeout(lTimeout);
+
+ spin_lock_irq(&pThis->Spinlock);
+ set_current_state(TASK_RUNNING);
+
+ /* Did someone wake us up? */
+ if (Waiter.enmReason == RTSEMMUTEXLNXWAITER_WAKEUP)
+ {
+ Assert(pThis->cRecursions == 0);
+ pThis->cRecursions = 1;
+ pThis->pOwnerTask = pSelf;
+ rc = VINF_SUCCESS;
+ break;
+ }
+
+ /* Is the mutex being destroyed? */
+ if (RT_UNLIKELY( Waiter.enmReason == RTSEMMUTEXLNXWAITER_DESTROYED
+ || pThis->u32Magic != RTSEMMUTEX_MAGIC))
+ {
+ rc = VERR_SEM_DESTROYED;
+ break;
+ }
+ }
+
+ /*
+ * Unlink ourself from the waiter list, dereference the mutex and exit the
+ * lock. We might have to free the mutex if it was the destroyed.
+ */
+ RTListNodeRemove(&Waiter.ListEntry);
+ IPRT_DEBUG_SEMS_STATE_RC(pThis, 'M', rc);
+
+ if (RT_LIKELY(ASMAtomicDecU32(&pThis->cRefs) != 0))
+ spin_unlock_irqrestore(&pThis->Spinlock, fSavedIrq);
+ else
+ {
+ Assert(RT_FAILURE_NP(rc));
+ spin_unlock_irqrestore(&pThis->Spinlock, fSavedIrq);
+ RTMemFree(pThis);
+ }
+ return rc;
+}
+
+
+/**
+ * Internal worker.
+ */
+DECLINLINE(int) rtSemMutexLinuxRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fInterruptible)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ struct task_struct *pSelf = current;
+ unsigned long fSavedIrq;
+ int rc;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ Assert(pThis->cRefs >= 1);
+
+ /*
+ * Lock it and check if it's a recursion.
+ */
+ spin_lock_irqsave(&pThis->Spinlock, fSavedIrq);
+ if (pThis->pOwnerTask == pSelf)
+ {
+ pThis->cRecursions++;
+ Assert(pThis->cRecursions > 1);
+ Assert(pThis->cRecursions < 256);
+ rc = VINF_SUCCESS;
+ }
+ /*
+ * Not a recursion, maybe it's not owned by anyone then?
+ */
+ else if ( pThis->pOwnerTask == NULL
+ && RTListIsEmpty(&pThis->WaiterList))
+ {
+ Assert(pThis->cRecursions == 0);
+ pThis->cRecursions = 1;
+ pThis->pOwnerTask = pSelf;
+ rc = VINF_SUCCESS;
+ }
+ /*
+ * Was it a polling call?
+ */
+ else if (cMillies == 0)
+ rc = VERR_TIMEOUT;
+ /*
+ * No, so go to sleep.
+ */
+ else
+ {
+ rc = rtSemMutexLinuxRequestSleep(pThis, cMillies, fInterruptible, fSavedIrq);
+ IPRT_LINUX_RESTORE_EFL_ONLY_AC();
+ return rc;
+ }
+
+ IPRT_DEBUG_SEMS_STATE_RC(pThis, 'M', rc);
+ spin_unlock_irqrestore(&pThis->Spinlock, fSavedIrq);
+ IPRT_LINUX_RESTORE_EFL_ONLY_AC();
+ return rc;
+}
+
+
+RTDECL(int) RTSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtSemMutexLinuxRequest(hMutexSem, cMillies, false /*fInterruptible*/);
+}
+RT_EXPORT_SYMBOL(RTSemMutexRequest);
+
+
+RTDECL(int) RTSemMutexRequestDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RT_NOREF_PV(uId); RT_SRC_POS_NOREF();
+ return RTSemMutexRequest(hMutexSem, cMillies);
+}
+RT_EXPORT_SYMBOL(RTSemMutexRequestDebug);
+
+
+RTDECL(int) RTSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtSemMutexLinuxRequest(hMutexSem, cMillies, true /*fInterruptible*/);
+}
+RT_EXPORT_SYMBOL(RTSemMutexRequestNoResume);
+
+
+RTDECL(int) RTSemMutexRequestNoResumeDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RT_NOREF_PV(uId); RT_SRC_POS_NOREF();
+ return RTSemMutexRequestNoResume(hMutexSem, cMillies);
+}
+RT_EXPORT_SYMBOL(RTSemMutexRequestNoResumeDebug);
+
+
+RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMtx)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMtx;
+ struct task_struct *pSelf = current;
+ unsigned long fSavedIrq;
+ int rc;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ Assert(pThis->cRefs >= 1);
+
+ /*
+ * Take the lock and release one recursion.
+ */
+ spin_lock_irqsave(&pThis->Spinlock, fSavedIrq);
+ if (pThis->pOwnerTask == pSelf)
+ {
+ Assert(pThis->cRecursions > 0);
+ if (--pThis->cRecursions == 0)
+ {
+ pThis->pOwnerTask = NULL;
+
+ /* anyone to wake up? */
+ if (!RTListIsEmpty(&pThis->WaiterList))
+ {
+ PRTSEMMUTEXLNXWAITER pWaiter = RTListGetFirst(&pThis->WaiterList, RTSEMMUTEXLNXWAITER, ListEntry);
+ pWaiter->enmReason = RTSEMMUTEXLNXWAITER_WAKEUP;
+ wake_up_process(pWaiter->pTask);
+ }
+ IPRT_DEBUG_SEMS_STATE(pThis, 'u');
+ }
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NOT_OWNER;
+ spin_unlock_irqrestore(&pThis->Spinlock, fSavedIrq);
+
+ AssertRC(rc);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+}
+RT_EXPORT_SYMBOL(RTSemMutexRelease);
+
+
+RTDECL(bool) RTSemMutexIsOwned(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ unsigned long fSavedIrq;
+ bool fOwned;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pThis, false);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), false);
+ Assert(pThis->cRefs >= 1);
+
+ /*
+ * Take the lock and release one recursion.
+ */
+ spin_lock_irqsave(&pThis->Spinlock, fSavedIrq);
+ fOwned = pThis->pOwnerTask != NULL;
+ spin_unlock_irqrestore(&pThis->Spinlock, fSavedIrq);
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return fOwned;
+
+}
+RT_EXPORT_SYMBOL(RTSemMutexIsOwned);
+
diff --git a/src/VBox/Runtime/r0drv/linux/spinlock-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/spinlock-r0drv-linux.c
new file mode 100644
index 00000000..ac269880
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/spinlock-r0drv-linux.c
@@ -0,0 +1,186 @@
+/* $Id: spinlock-r0drv-linux.c $ */
+/** @file
+ * IPRT - Spinlocks, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/spinlock.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/thread.h>
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the spinlock_t structure.
+ */
+typedef struct RTSPINLOCKINTERNAL
+{
+ /** Spinlock magic value (RTSPINLOCK_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The spinlock creation flags. */
+ uint32_t fFlags;
+ /** The saved interrupt flag. */
+ unsigned long volatile fIntSaved;
+ /** The linux spinlock structure. */
+ spinlock_t Spinlock;
+#ifdef RT_MORE_STRICT
+ /** The idAssertCpu variable before acquring the lock for asserting after
+ * releasing the spinlock. */
+ RTCPUID volatile idAssertCpu;
+ /** The CPU that owns the lock. */
+ RTCPUID volatile idCpuOwner;
+#endif
+} RTSPINLOCKINTERNAL, *PRTSPINLOCKINTERNAL;
+
+
+
+RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ PRTSPINLOCKINTERNAL pThis;
+ AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER);
+ RT_NOREF_PV(pszName);
+
+ /*
+ * Allocate.
+ */
+ Assert(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *));
+ pThis = (PRTSPINLOCKINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+ /*
+ * Initialize and return.
+ */
+ pThis->u32Magic = RTSPINLOCK_MAGIC;
+ pThis->fFlags = fFlags;
+ pThis->fIntSaved = 0;
+#ifdef RT_MORE_STRICT
+ pThis->idCpuOwner = NIL_RTCPUID;
+ pThis->idAssertCpu = NIL_RTCPUID;
+#endif
+
+ spin_lock_init(&pThis->Spinlock);
+
+ *pSpinlock = pThis;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSpinlockCreate);
+
+
+RTDECL(int) RTSpinlockDestroy(RTSPINLOCK Spinlock)
+{
+ /*
+ * Validate input.
+ */
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ if (pThis->u32Magic != RTSPINLOCK_MAGIC)
+ {
+ AssertMsgFailed(("Invalid spinlock %p magic=%#x\n", pThis, pThis->u32Magic));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ ASMAtomicIncU32(&pThis->u32Magic);
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTSpinlockDestroy);
+
+
+RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ IPRT_LINUX_SAVE_EFL_AC();
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+ AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_MAGIC,
+ ("pThis=%p u32Magic=%08x\n", pThis, pThis ? (int)pThis->u32Magic : 0));
+
+#ifdef CONFIG_PROVE_LOCKING
+ lockdep_off();
+#endif
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+ unsigned long fIntSaved;
+ spin_lock_irqsave(&pThis->Spinlock, fIntSaved);
+ pThis->fIntSaved = fIntSaved;
+ }
+ else
+ spin_lock(&pThis->Spinlock);
+#ifdef CONFIG_PROVE_LOCKING
+ lockdep_on();
+#endif
+
+ IPRT_LINUX_RESTORE_EFL_ONLY_AC();
+ RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
+}
+RT_EXPORT_SYMBOL(RTSpinlockAcquire);
+
+
+RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ IPRT_LINUX_SAVE_EFL_AC(); /* spin_unlock* may preempt and trash eflags.ac. */
+ RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE_VARS();
+ AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_MAGIC,
+ ("pThis=%p u32Magic=%08x\n", pThis, pThis ? (int)pThis->u32Magic : 0));
+ RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE(pThis);
+
+#ifdef CONFIG_PROVE_LOCKING
+ lockdep_off();
+#endif
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+ unsigned long fIntSaved = pThis->fIntSaved;
+ pThis->fIntSaved = 0;
+ spin_unlock_irqrestore(&pThis->Spinlock, fIntSaved);
+ }
+ else
+ spin_unlock(&pThis->Spinlock);
+#ifdef CONFIG_PROVE_LOCKING
+ lockdep_on();
+#endif
+
+ IPRT_LINUX_RESTORE_EFL_ONLY_AC();
+ RT_ASSERT_PREEMPT_CPUID();
+}
+RT_EXPORT_SYMBOL(RTSpinlockRelease);
+
diff --git a/src/VBox/Runtime/r0drv/linux/string.h b/src/VBox/Runtime/r0drv/linux/string.h
new file mode 100644
index 00000000..37c09fe6
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/string.h
@@ -0,0 +1,60 @@
+/* $Id: string.h $ */
+/** @file
+ * IPRT - wrapper for the linux kernel asm/string.h.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_linux_string_h
+#define IPRT_INCLUDED_SRC_r0drv_linux_string_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/cdefs.h>
+
+RT_C_DECLS_BEGIN
+#ifndef bool /* Linux 2.6.19 C++ nightmare */
+#define bool bool_type
+#define true true_type
+#define false false_type
+#define _Bool int
+#define bool_type_r0drv_string_h__
+#endif
+#include <linux/types.h>
+#include <linux/string.h>
+#ifdef bool_type_r0drv_string_h__
+#undef bool
+#undef true
+#undef false
+#undef bool_type_r0drv_string_h__
+#endif
+char *strpbrk(const char *pszStr, const char *pszChars)
+#if defined(__THROW)
+ __THROW
+#endif
+ ;
+
+RT_C_DECLS_END
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_linux_string_h */
+
diff --git a/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h b/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h
new file mode 100644
index 00000000..e31f2fee
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h
@@ -0,0 +1,461 @@
+/* $Id: the-linux-kernel.h $ */
+/** @file
+ * IPRT - Include all necessary headers for the Linux kernel.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_linux_the_linux_kernel_h
+#define IPRT_INCLUDED_SRC_r0drv_linux_the_linux_kernel_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+/*
+ * Include iprt/types.h to install the bool wrappers.
+ * Then use the linux bool type for all the stuff include here.
+ */
+#include <iprt/types.h>
+#define bool linux_bool
+
+#if RT_GNUC_PREREQ(4, 6)
+# pragma GCC diagnostic push
+#endif
+#if RT_GNUC_PREREQ(4, 2)
+# pragma GCC diagnostic ignored "-Wunused-parameter"
+# if !defined(__cplusplus) && RT_GNUC_PREREQ(4, 3)
+# pragma GCC diagnostic ignored "-Wold-style-declaration" /* 2.6.18-411.0.0.0.1.el5/build/include/asm/apic.h:110: warning: 'inline' is not at beginning of declaration [-Wold-style-declaration] */
+# endif
+#endif
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)
+# include <generated/autoconf.h>
+#else
+# ifndef AUTOCONF_INCLUDED
+# include <linux/autoconf.h>
+# endif
+#endif
+
+/* We only support 2.4 and 2.6 series kernels */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
+# error We only support 2.4 and 2.6 series kernels
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+# error We only support 2.4 and 2.6 series kernels
+#endif
+
+#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
+# define MODVERSIONS
+# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 71)
+# include <linux/modversions.h>
+# endif
+#endif
+#ifndef KBUILD_STR
+# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
+# define KBUILD_STR(s) s
+# else
+# define KBUILD_STR(s) #s
+# endif
+#endif
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
+# include <linux/kconfig.h> /* for macro IS_ENABLED */
+# endif
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+# include <linux/semaphore.h>
+#else /* older kernels */
+# include <asm/semaphore.h>
+#endif /* older kernels */
+#include <linux/module.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+# include <linux/moduleparam.h>
+#endif
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+# include <linux/namei.h>
+#endif
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/sched.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 23) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 31)
+#include <linux/splice.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+# include <linux/sched/rt.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+# include <linux/sched/signal.h>
+# include <linux/sched/types.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
+# include <linux/jiffies.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 16)
+# include <linux/ktime.h>
+# include <linux/hrtimer.h>
+#endif
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 71)
+# include <linux/cpu.h>
+# include <linux/notifier.h>
+#endif
+/* For the basic additions module */
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/compiler.h>
+#ifndef HAVE_UNLOCKED_IOCTL /* linux/fs.h defines this */
+# include <linux/smp_lock.h>
+#endif
+/* For the shared folders module */
+#include <linux/vmalloc.h>
+#define wchar_t linux_wchar_t
+#include <linux/nls.h>
+#undef wchar_t
+#include <asm/mman.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/div64.h>
+
+/* For thread-context hooks. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18) && defined(CONFIG_PREEMPT_NOTIFIERS)
+# include <linux/preempt.h>
+#endif
+
+/* for workqueue / task queues. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
+# include <linux/workqueue.h>
+#else
+# include <linux/tqueue.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+# include <linux/kthread.h>
+#endif
+
+/* for cr4_init_shadow() / cpu_tlbstate. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 20, 0)
+# include <asm/tlbflush.h>
+#endif
+
+/* for set_pages_x() */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
+# include <asm/set_memory.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+# include <asm/smap.h>
+#else
+static inline void clac(void) { }
+static inline void stac(void) { }
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+# ifndef page_to_pfn
+# define page_to_pfn(page) ((page) - mem_map)
+# endif
+#endif
+
+#ifndef DEFINE_WAIT
+# define DEFINE_WAIT(name) DECLARE_WAITQUEUE(name, current)
+#endif
+
+#ifndef __GFP_NOWARN
+# define __GFP_NOWARN 0
+#endif
+
+/*
+ * 2.4 / early 2.6 compatibility wrappers
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7)
+
+# ifndef MAX_JIFFY_OFFSET
+# define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
+# endif
+
+# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 29) || LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+
+DECLINLINE(unsigned int) jiffies_to_msecs(unsigned long cJiffies)
+{
+# if HZ <= 1000 && !(1000 % HZ)
+ return (1000 / HZ) * cJiffies;
+# elif HZ > 1000 && !(HZ % 1000)
+ return (cJiffies + (HZ / 1000) - 1) / (HZ / 1000);
+# else
+ return (cJiffies * 1000) / HZ;
+# endif
+}
+
+DECLINLINE(unsigned long) msecs_to_jiffies(unsigned int cMillies)
+{
+# if HZ > 1000
+ if (cMillies > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+# endif
+# if HZ <= 1000 && !(1000 % HZ)
+ return (cMillies + (1000 / HZ) - 1) / (1000 / HZ);
+# elif HZ > 1000 && !(HZ % 1000)
+ return cMillies * (HZ / 1000);
+# else
+ return (cMillies * HZ + 999) / 1000;
+# endif
+}
+
+# endif /* < 2.4.29 || >= 2.6.0 */
+
+#endif /* < 2.6.7 */
+
+/*
+ * 2.4 compatibility wrappers
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+
+# define prepare_to_wait(q, wait, state) \
+ do { \
+ add_wait_queue(q, wait); \
+ set_current_state(state); \
+ } while (0)
+
+# define after_wait(wait) \
+ do { \
+ list_del_init(&(wait)->task_list); \
+ } while (0)
+
+# define finish_wait(q, wait) \
+ do { \
+ set_current_state(TASK_RUNNING); \
+ remove_wait_queue(q, wait); \
+ } while (0)
+
+#else /* >= 2.6.0 */
+
+# define after_wait(wait) do {} while (0)
+
+#endif /* >= 2.6.0 */
+
+/** @def TICK_NSEC
+ * The time between ticks in nsec */
+#ifndef TICK_NSEC
+# define TICK_NSEC (1000000000UL / HZ)
+#endif
+
+/*
+ * This sucks soooo badly on x86! Why don't they export __PAGE_KERNEL_EXEC so PAGE_KERNEL_EXEC would be usable?
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8) && defined(RT_ARCH_AMD64)
+# define MY_PAGE_KERNEL_EXEC PAGE_KERNEL_EXEC
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8) && defined(PAGE_KERNEL_EXEC) && defined(CONFIG_X86_PAE)
+# ifdef __PAGE_KERNEL_EXEC
+ /* >= 2.6.27 */
+# define MY_PAGE_KERNEL_EXEC __pgprot(boot_cpu_has(X86_FEATURE_PGE) ? __PAGE_KERNEL_EXEC | _PAGE_GLOBAL : __PAGE_KERNEL_EXEC)
+# else
+# define MY_PAGE_KERNEL_EXEC __pgprot(boot_cpu_has(X86_FEATURE_PGE) ? _PAGE_KERNEL_EXEC | _PAGE_GLOBAL : _PAGE_KERNEL_EXEC)
+# endif
+#else
+# define MY_PAGE_KERNEL_EXEC PAGE_KERNEL
+#endif
+
+
+/*
+ * The redhat hack section.
+ * - The current hacks are for 2.4.21-15.EL only.
+ */
+#ifndef NO_REDHAT_HACKS
+/* accounting. */
+# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+# ifdef VM_ACCOUNT
+# define USE_RHEL4_MUNMAP
+# endif
+# endif
+
+/* backported remap_page_range. */
+# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+# include <asm/tlb.h>
+# ifdef tlb_vma /* probably not good enough... */
+# define HAVE_26_STYLE_REMAP_PAGE_RANGE 1
+# endif
+# endif
+
+# ifndef RT_ARCH_AMD64
+/* In 2.6.9-22.ELsmp we have to call change_page_attr() twice when changing
+ * the page attributes from PAGE_KERNEL to something else, because there appears
+ * to be a bug in one of the many patches that redhat applied.
+ * It should be safe to do this on less buggy linux kernels too. ;-)
+ */
+# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) \
+ do { \
+ if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) \
+ change_page_attr(pPages, cPages, prot); \
+ change_page_attr(pPages, cPages, prot); \
+ } while (0)
+# endif /* !RT_ARCH_AMD64 */
+#endif /* !NO_REDHAT_HACKS */
+
+#ifndef MY_CHANGE_PAGE_ATTR
+# ifdef RT_ARCH_AMD64 /** @todo This is a cheap hack, but it'll get around that 'else BUG();' in __change_page_attr(). */
+# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) \
+ do { \
+ change_page_attr(pPages, cPages, PAGE_KERNEL_NOCACHE); \
+ change_page_attr(pPages, cPages, prot); \
+ } while (0)
+# else
+# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) change_page_attr(pPages, cPages, prot)
+# endif
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+# define MY_SET_PAGES_EXEC(pPages, cPages) set_pages_x(pPages, cPages)
+# define MY_SET_PAGES_NOEXEC(pPages, cPages) set_pages_nx(pPages, cPages)
+#else
+# define MY_SET_PAGES_EXEC(pPages, cPages) \
+ do { \
+ if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL)) \
+ MY_CHANGE_PAGE_ATTR(pPages, cPages, MY_PAGE_KERNEL_EXEC); \
+ } while (0)
+# define MY_SET_PAGES_NOEXEC(pPages, cPages) \
+ do { \
+ if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL)) \
+ MY_CHANGE_PAGE_ATTR(pPages, cPages, PAGE_KERNEL); \
+ } while (0)
+#endif
+
+/** @def ONE_MSEC_IN_JIFFIES
+ * The number of jiffies that make up 1 millisecond. Must be at least 1! */
+#if HZ <= 1000
+# define ONE_MSEC_IN_JIFFIES 1
+#elif !(HZ % 1000)
+# define ONE_MSEC_IN_JIFFIES (HZ / 1000)
+#else
+# define ONE_MSEC_IN_JIFFIES ((HZ + 999) / 1000)
+# error "HZ is not a multiple of 1000, the GIP stuff won't work right!"
+#endif
+
+/*
+ * Stop using the linux bool type.
+ */
+#undef bool
+
+#if RT_GNUC_PREREQ(4, 6)
+# pragma GCC diagnostic pop
+#endif
+
+/*
+ * There are post-2.6.24 kernels (confusingly with unchanged version number)
+ * which eliminate macros which were marked as deprecated.
+ */
+#ifndef __attribute_used__
+#define __attribute_used__ __used
+#endif
+
+/**
+ * Hack for shortening pointers on linux so we can stuff more stuff into the
+ * task_struct::comm field. This is used by the semaphore code but put here
+ * because we don't have any better place atm. Don't use outside IPRT, please.
+ */
+#ifdef RT_ARCH_AMD64
+# define IPRT_DEBUG_SEMS_ADDRESS(addr) ( ((long)(addr) & (long)~UINT64_C(0xfffffff000000000)) )
+#else
+# define IPRT_DEBUG_SEMS_ADDRESS(addr) ( (long)(addr) )
+#endif
+
+/**
+ * Puts semaphore info into the task_struct::comm field if IPRT_DEBUG_SEMS is
+ * defined.
+ */
+#ifdef IPRT_DEBUG_SEMS
+# define IPRT_DEBUG_SEMS_STATE(pThis, chState) \
+ snprintf(current->comm, sizeof(current->comm), "%c%lx", (chState), IPRT_DEBUG_SEMS_ADDRESS(pThis));
+#else
+# define IPRT_DEBUG_SEMS_STATE(pThis, chState) do { } while (0)
+#endif
+
+/**
+ * Puts semaphore info into the task_struct::comm field if IPRT_DEBUG_SEMS is
+ * defined.
+ */
+#ifdef IPRT_DEBUG_SEMS
+# define IPRT_DEBUG_SEMS_STATE_RC(pThis, chState, rc) \
+ snprintf(current->comm, sizeof(current->comm), "%c%lx:%d", (chState), IPRT_DEBUG_SEMS_ADDRESS(pThis), rc);
+#else
+# define IPRT_DEBUG_SEMS_STATE_RC(pThis, chState, rc) do { } while (0)
+#endif
+
+/** @name Macros for preserving EFLAGS.AC on 3.19+/amd64 paranoid.
+ * The AMD 64 switch_to in macro in arch/x86/include/asm/switch_to.h stopped
+ * restoring flags.
+ * @{ */
+#if defined(CONFIG_X86_SMAP) || defined(RT_STRICT) || defined(IPRT_WITH_EFLAGS_AC_PRESERVING)
+# include <iprt/asm-amd64-x86.h>
+# define IPRT_X86_EFL_AC RT_BIT(18)
+# define IPRT_LINUX_SAVE_EFL_AC() RTCCUINTREG fSavedEfl = ASMGetFlags()
+# define IPRT_LINUX_RESTORE_EFL_AC() ASMSetFlags(fSavedEfl)
+# define IPRT_LINUX_RESTORE_EFL_ONLY_AC() ASMChangeFlags(~IPRT_X86_EFL_AC, fSavedEfl & IPRT_X86_EFL_AC)
+#else
+# define IPRT_LINUX_SAVE_EFL_AC() do { } while (0)
+# define IPRT_LINUX_RESTORE_EFL_AC() do { } while (0)
+# define IPRT_LINUX_RESTORE_EFL_ONLY_AC() do { } while (0)
+#endif
+/** @} */
+
+/*
+ * There are some conflicting defines in iprt/param.h, sort them out here.
+ */
+#ifndef IPRT_INCLUDED_param_h
+# undef PAGE_SIZE
+# undef PAGE_OFFSET_MASK
+# include <iprt/param.h>
+#endif
+
+/*
+ * Some global indicator macros.
+ */
+/** @def IPRT_LINUX_HAS_HRTIMER
+ * Whether the kernel support high resolution timers (Linux kernel versions
+ * 2.6.28 and later (hrtimer_add_expires_ns() & schedule_hrtimeout). */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+# define IPRT_LINUX_HAS_HRTIMER
+#endif
+
+/*
+ * Workqueue stuff, see initterm-r0drv-linux.c.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
+typedef struct work_struct RTR0LNXWORKQUEUEITEM;
+#else
+typedef struct tq_struct RTR0LNXWORKQUEUEITEM;
+#endif
+DECLHIDDEN(void) rtR0LnxWorkqueuePush(RTR0LNXWORKQUEUEITEM *pWork, void (*pfnWorker)(RTR0LNXWORKQUEUEITEM *));
+DECLHIDDEN(void) rtR0LnxWorkqueueFlush(void);
+
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_linux_the_linux_kernel_h */
diff --git a/src/VBox/Runtime/r0drv/linux/thread-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/thread-r0drv-linux.c
new file mode 100644
index 00000000..c8e0c9d3
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/thread-r0drv-linux.c
@@ -0,0 +1,234 @@
+/* $Id: thread-r0drv-linux.c $ */
+/** @file
+ * IPRT - Threads, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#include <iprt/asm.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28) || defined(CONFIG_X86_SMAP)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mp.h>
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+#ifndef CONFIG_PREEMPT
+/** Per-cpu preemption counters. */
+static int32_t volatile g_acPreemptDisabled[NR_CPUS];
+#endif
+
+
+RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
+{
+ return (RTNATIVETHREAD)current;
+}
+RT_EXPORT_SYMBOL(RTThreadNativeSelf);
+
+
+static int rtR0ThreadLnxSleepCommon(RTMSINTERVAL cMillies)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+ long cJiffies = msecs_to_jiffies(cMillies);
+ set_current_state(TASK_INTERRUPTIBLE);
+ cJiffies = schedule_timeout(cJiffies);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ if (!cJiffies)
+ return VINF_SUCCESS;
+ return VERR_INTERRUPTED;
+}
+
+
+RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadLnxSleepCommon(cMillies);
+}
+RT_EXPORT_SYMBOL(RTThreadSleep);
+
+
+RTDECL(int) RTThreadSleepNoLog(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadLnxSleepCommon(cMillies);
+}
+RT_EXPORT_SYMBOL(RTThreadSleepNoLog);
+
+
+RTDECL(bool) RTThreadYield(void)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 20)
+ yield();
+#else
+ /** @todo r=ramshankar: Can we use cond_resched() instead? */
+ set_current_state(TASK_RUNNING);
+ sys_sched_yield();
+ schedule();
+#endif
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return true;
+}
+RT_EXPORT_SYMBOL(RTThreadYield);
+
+
+RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
+{
+#ifdef CONFIG_PREEMPT
+ Assert(hThread == NIL_RTTHREAD); RT_NOREF_PV(hThread);
+# ifdef preemptible
+ return preemptible();
+# else
+ return preempt_count() == 0 && !in_atomic() && !irqs_disabled();
+# endif
+#else
+ int32_t c;
+
+ Assert(hThread == NIL_RTTHREAD);
+ c = g_acPreemptDisabled[smp_processor_id()];
+ AssertMsg(c >= 0 && c < 32, ("%d\n", c));
+ if (c != 0)
+ return false;
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
+ if (in_atomic())
+ return false;
+# endif
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 28)
+ if (irqs_disabled())
+ return false;
+# else
+ if (!ASMIntAreEnabled())
+ return false;
+# endif
+ return true;
+#endif
+}
+RT_EXPORT_SYMBOL(RTThreadPreemptIsEnabled);
+
+
+RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD); RT_NOREF_PV(hThread);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 4)
+ return !!test_tsk_thread_flag(current, TIF_NEED_RESCHED);
+
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 20)
+ return !!need_resched();
+
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 110)
+ return current->need_resched != 0;
+
+#else
+ return need_resched != 0;
+#endif
+}
+RT_EXPORT_SYMBOL(RTThreadPreemptIsPending);
+
+
+RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
+{
+ /* yes, RTThreadPreemptIsPending is reliable. */
+ return true;
+}
+RT_EXPORT_SYMBOL(RTThreadPreemptIsPendingTrusty);
+
+
+RTDECL(bool) RTThreadPreemptIsPossible(void)
+{
+#ifdef CONFIG_PREEMPT
+ return true; /* Yes, kernel preemption is possible. */
+#else
+ return false; /* No kernel preemption (or CONFIG_PREEMPT_VOLUNTARY). */
+#endif
+}
+RT_EXPORT_SYMBOL(RTThreadPreemptIsPossible);
+
+
+RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
+{
+#ifdef CONFIG_PREEMPT
+ AssertPtr(pState);
+ Assert(pState->u32Reserved == 0);
+ pState->u32Reserved = 42;
+ /* This ASSUMES that CONFIG_PREEMPT_COUNT is always defined with CONFIG_PREEMPT. */
+ preempt_disable();
+ RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
+
+#else /* !CONFIG_PREEMPT */
+ int32_t c;
+ AssertPtr(pState);
+ Assert(pState->u32Reserved == 0);
+
+ /* Do our own accounting. */
+ c = ASMAtomicIncS32(&g_acPreemptDisabled[smp_processor_id()]);
+ AssertMsg(c > 0 && c < 32, ("%d\n", c));
+ pState->u32Reserved = c;
+ RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
+#endif
+}
+RT_EXPORT_SYMBOL(RTThreadPreemptDisable);
+
+
+RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
+{
+#ifdef CONFIG_PREEMPT
+ IPRT_LINUX_SAVE_EFL_AC(); /* paranoia */
+ AssertPtr(pState);
+ Assert(pState->u32Reserved == 42);
+ RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
+ preempt_enable();
+ IPRT_LINUX_RESTORE_EFL_ONLY_AC(); /* paranoia */
+
+#else
+ int32_t volatile *pc;
+ AssertPtr(pState);
+ AssertMsg(pState->u32Reserved > 0 && pState->u32Reserved < 32, ("%d\n", pState->u32Reserved));
+ RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
+
+ /* Do our own accounting. */
+ pc = &g_acPreemptDisabled[smp_processor_id()];
+ AssertMsg(pState->u32Reserved == (uint32_t)*pc, ("u32Reserved=%d *pc=%d \n", pState->u32Reserved, *pc));
+ ASMAtomicUoWriteS32(pc, pState->u32Reserved - 1);
+#endif
+ pState->u32Reserved = 0;
+}
+RT_EXPORT_SYMBOL(RTThreadPreemptRestore);
+
+
+RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD); NOREF(hThread);
+
+ return in_interrupt() != 0;
+}
+RT_EXPORT_SYMBOL(RTThreadIsInInterrupt);
+
diff --git a/src/VBox/Runtime/r0drv/linux/thread2-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/thread2-r0drv-linux.c
new file mode 100644
index 00000000..8647583a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/thread2-r0drv-linux.c
@@ -0,0 +1,162 @@
+/* $Id: thread2-r0drv-linux.c $ */
+/** @file
+ * IPRT - Threads (Part 2), Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+#include <iprt/errcore.h>
+#include "internal/thread.h"
+
+
+RTDECL(RTTHREAD) RTThreadSelf(void)
+{
+ return rtThreadGetByNative((RTNATIVETHREAD)current);
+}
+
+
+DECLHIDDEN(int) rtThreadNativeInit(void)
+{
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+ /* See comment near MAX_RT_PRIO in linux/sched.h for details on
+ sched_priority. */
+ int iSchedClass = SCHED_NORMAL;
+ struct sched_param Param = { .sched_priority = MAX_PRIO - 1 };
+ switch (enmType)
+ {
+ case RTTHREADTYPE_INFREQUENT_POLLER:
+ Param.sched_priority = MAX_RT_PRIO + 5;
+ break;
+
+ case RTTHREADTYPE_EMULATION:
+ Param.sched_priority = MAX_RT_PRIO + 4;
+ break;
+
+ case RTTHREADTYPE_DEFAULT:
+ Param.sched_priority = MAX_RT_PRIO + 3;
+ break;
+
+ case RTTHREADTYPE_MSG_PUMP:
+ Param.sched_priority = MAX_RT_PRIO + 2;
+ break;
+
+ case RTTHREADTYPE_IO:
+ iSchedClass = SCHED_FIFO;
+ Param.sched_priority = MAX_RT_PRIO - 1;
+ break;
+
+ case RTTHREADTYPE_TIMER:
+ iSchedClass = SCHED_FIFO;
+ Param.sched_priority = 1; /* not 0 just in case */
+ break;
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", enmType));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ sched_setscheduler(current, iSchedClass, &Param);
+#else
+ RT_NOREF_PV(enmType);
+#endif
+ RT_NOREF_PV(pThread);
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtThreadNativeAdopt(PRTTHREADINT pThread)
+{
+ RT_NOREF_PV(pThread);
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+DECLHIDDEN(void) rtThreadNativeWaitKludge(PRTTHREADINT pThread)
+{
+ /** @todo fix RTThreadWait/RTR0Term race on linux. */
+ RTThreadSleep(1); NOREF(pThread);
+}
+
+
+DECLHIDDEN(void) rtThreadNativeDestroy(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 4)
+/**
+ * Native kernel thread wrapper function.
+ *
+ * This will forward to rtThreadMain and do termination upon return.
+ *
+ * @param pvArg Pointer to the argument package.
+ */
+static int rtThreadNativeMain(void *pvArg)
+{
+ PRTTHREADINT pThread = (PRTTHREADINT)pvArg;
+
+ rtThreadMain(pThread, (RTNATIVETHREAD)current, &pThread->szName[0]);
+ return 0;
+}
+#endif
+
+
+DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 4)
+ struct task_struct *NativeThread;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ RT_ASSERT_PREEMPTIBLE();
+
+ NativeThread = kthread_run(rtThreadNativeMain, pThreadInt, "iprt-%s", pThreadInt->szName);
+
+ if (!IS_ERR(NativeThread))
+ {
+ *pNativeThread = (RTNATIVETHREAD)NativeThread;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_GENERAL_FAILURE;
+#else
+ return VERR_NOT_IMPLEMENTED;
+#endif
+}
+
diff --git a/src/VBox/Runtime/r0drv/linux/threadctxhooks-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/threadctxhooks-r0drv-linux.c
new file mode 100644
index 00000000..fbd2eb90
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/threadctxhooks-r0drv-linux.c
@@ -0,0 +1,330 @@
+/* $Id: threadctxhooks-r0drv-linux.c $ */
+/** @file
+ * IPRT - Thread Context Switching Hook, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2013-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/mem.h>
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+#include <iprt/errcore.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include "internal/thread.h"
+
+
+/*
+ * Linux kernel 2.6.23 introduced preemption notifiers but RedHat 2.6.18 kernels
+ * got it backported.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18) && defined(CONFIG_PREEMPT_NOTIFIERS)
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The internal hook object for linux.
+ */
+typedef struct RTTHREADCTXHOOKINT
+{
+ /** Magic value (RTTHREADCTXHOOKINT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The thread handle (owner) for which the hook is registered. */
+ RTNATIVETHREAD hOwner;
+ /** The preemption notifier object. */
+ struct preempt_notifier LnxPreemptNotifier;
+ /** Whether the hook is enabled or not. If enabled, the LnxPreemptNotifier
+ * is linked into the owning thread's list of preemption callouts. */
+ bool fEnabled;
+ /** Pointer to the user callback. */
+ PFNRTTHREADCTXHOOK pfnCallback;
+ /** User argument passed to the callback. */
+ void *pvUser;
+ /** The linux callbacks. */
+ struct preempt_ops PreemptOps;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 19) && defined(RT_ARCH_AMD64)
+ /** Starting with 3.1.19, the linux kernel doesn't restore kernel RFLAGS during
+ * task switch, so we have to do that ourselves. (x86 code is not affected.) */
+ RTCCUINTREG fSavedRFlags;
+#endif
+} RTTHREADCTXHOOKINT;
+typedef RTTHREADCTXHOOKINT *PRTTHREADCTXHOOKINT;
+
+
+/**
+ * Hook function for the thread schedule out event.
+ *
+ * @param pPreemptNotifier Pointer to the preempt_notifier struct.
+ * @param pNext Pointer to the task that is being scheduled
+ * instead of the current thread.
+ *
+ * @remarks Called with the rq (runqueue) lock held and with preemption and
+ * interrupts disabled!
+ */
+static void rtThreadCtxHooksLnxSchedOut(struct preempt_notifier *pPreemptNotifier, struct task_struct *pNext)
+{
+ PRTTHREADCTXHOOKINT pThis = RT_FROM_MEMBER(pPreemptNotifier, RTTHREADCTXHOOKINT, LnxPreemptNotifier);
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ RTCCUINTREG fSavedEFlags = ASMGetFlags();
+ stac();
+#endif
+ RT_NOREF_PV(pNext);
+
+ AssertPtr(pThis);
+ AssertPtr(pThis->pfnCallback);
+ Assert(pThis->fEnabled);
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+
+ pThis->pfnCallback(RTTHREADCTXEVENT_OUT, pThis->pvUser);
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ ASMSetFlags(fSavedEFlags);
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 19) && defined(RT_ARCH_AMD64)
+ pThis->fSavedRFlags = fSavedEFlags;
+# endif
+#endif
+}
+
+
+/**
+ * Hook function for the thread schedule in event.
+ *
+ * @param pPreemptNotifier Pointer to the preempt_notifier struct.
+ * @param iCpu The CPU this thread is being scheduled on.
+ *
+ * @remarks Called without holding the rq (runqueue) lock and with preemption
+ * enabled!
+ * @todo r=bird: Preemption is of course disabled when it is called.
+ */
+static void rtThreadCtxHooksLnxSchedIn(struct preempt_notifier *pPreemptNotifier, int iCpu)
+{
+ PRTTHREADCTXHOOKINT pThis = RT_FROM_MEMBER(pPreemptNotifier, RTTHREADCTXHOOKINT, LnxPreemptNotifier);
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ RTCCUINTREG fSavedEFlags = ASMGetFlags();
+ stac();
+#endif
+ RT_NOREF_PV(iCpu);
+
+ AssertPtr(pThis);
+ AssertPtr(pThis->pfnCallback);
+ Assert(pThis->fEnabled);
+
+ pThis->pfnCallback(RTTHREADCTXEVENT_IN, pThis->pvUser);
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 19) && defined(RT_ARCH_AMD64)
+ fSavedEFlags &= ~RT_BIT_64(18) /*X86_EFL_AC*/;
+ fSavedEFlags |= pThis->fSavedRFlags & RT_BIT_64(18) /*X86_EFL_AC*/;
+# endif
+ ASMSetFlags(fSavedEFlags);
+#endif
+}
+
+
+/**
+ * Worker function for RTThreadCtxHooks(Deregister|Release)().
+ *
+ * @param pThis Pointer to the internal thread-context object.
+ */
+DECLINLINE(void) rtThreadCtxHookDisable(PRTTHREADCTXHOOKINT pThis)
+{
+ Assert(pThis->PreemptOps.sched_out == rtThreadCtxHooksLnxSchedOut);
+ Assert(pThis->PreemptOps.sched_in == rtThreadCtxHooksLnxSchedIn);
+ preempt_disable();
+ preempt_notifier_unregister(&pThis->LnxPreemptNotifier);
+ pThis->fEnabled = false;
+ preempt_enable();
+}
+
+
+RTDECL(int) RTThreadCtxHookCreate(PRTTHREADCTXHOOK phCtxHook, uint32_t fFlags, PFNRTTHREADCTXHOOK pfnCallback, void *pvUser)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis;
+ Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+ AssertReturn(fFlags == 0, VERR_INVALID_FLAGS);
+
+ /*
+ * Allocate and initialize a new hook. We don't register it yet, just
+ * create it.
+ */
+ pThis = (PRTTHREADCTXHOOKINT)RTMemAllocZ(sizeof(*pThis));
+ if (RT_UNLIKELY(!pThis))
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_NO_MEMORY;
+ }
+ pThis->u32Magic = RTTHREADCTXHOOKINT_MAGIC;
+ pThis->hOwner = RTThreadNativeSelf();
+ pThis->fEnabled = false;
+ pThis->pfnCallback = pfnCallback;
+ pThis->pvUser = pvUser;
+ preempt_notifier_init(&pThis->LnxPreemptNotifier, &pThis->PreemptOps);
+ pThis->PreemptOps.sched_out = rtThreadCtxHooksLnxSchedOut;
+ pThis->PreemptOps.sched_in = rtThreadCtxHooksLnxSchedIn;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
+ preempt_notifier_inc();
+#endif
+
+ *phCtxHook = pThis;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTThreadCtxHookCreate);
+
+
+RTDECL(int ) RTThreadCtxHookDestroy(RTTHREADCTXHOOK hCtxHook)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ if (pThis == NIL_RTTHREADCTXHOOK)
+ return VINF_SUCCESS;
+ AssertPtr(pThis);
+ AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
+ VERR_INVALID_HANDLE);
+ Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ Assert(!pThis->fEnabled || pThis->hOwner == RTThreadNativeSelf());
+
+ /*
+ * If there's still a registered thread-context hook, deregister it now before destroying the object.
+ */
+ if (pThis->fEnabled)
+ {
+ Assert(pThis->hOwner == RTThreadNativeSelf());
+ rtThreadCtxHookDisable(pThis);
+ Assert(!pThis->fEnabled); /* paranoia */
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
+ preempt_notifier_dec();
+#endif
+
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTTHREADCTXHOOKINT_MAGIC);
+ RTMemFree(pThis);
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTThreadCtxHookDestroy);
+
+
+RTDECL(int) RTThreadCtxHookEnable(RTTHREADCTXHOOK hCtxHook)
+{
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ AssertPtr(pThis);
+ AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
+ VERR_INVALID_HANDLE);
+ Assert(pThis->hOwner == RTThreadNativeSelf());
+ Assert(!pThis->fEnabled);
+ if (!pThis->fEnabled)
+ {
+ IPRT_LINUX_SAVE_EFL_AC();
+ Assert(pThis->PreemptOps.sched_out == rtThreadCtxHooksLnxSchedOut);
+ Assert(pThis->PreemptOps.sched_in == rtThreadCtxHooksLnxSchedIn);
+
+ /*
+ * Register the callback.
+ */
+ preempt_disable();
+ pThis->fEnabled = true;
+ preempt_notifier_register(&pThis->LnxPreemptNotifier);
+ preempt_enable();
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ }
+
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTThreadCtxHookEnable);
+
+
+RTDECL(int) RTThreadCtxHookDisable(RTTHREADCTXHOOK hCtxHook)
+{
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ if (pThis != NIL_RTTHREADCTXHOOK)
+ {
+ AssertPtr(pThis);
+ AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
+ VERR_INVALID_HANDLE);
+ Assert(pThis->hOwner == RTThreadNativeSelf());
+
+ /*
+ * Deregister the callback.
+ */
+ if (pThis->fEnabled)
+ {
+ IPRT_LINUX_SAVE_EFL_AC();
+ rtThreadCtxHookDisable(pThis);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ }
+ }
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTThreadCtxHookDisable);
+
+
+RTDECL(bool) RTThreadCtxHookIsEnabled(RTTHREADCTXHOOK hCtxHook)
+{
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ if (pThis == NIL_RTTHREADCTXHOOK)
+ return false;
+ AssertPtr(pThis);
+ AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
+ false);
+
+ return pThis->fEnabled;
+}
+
+#else /* Not supported / Not needed */
+# include "../generic/threadctxhooks-r0drv-generic.cpp"
+#endif /* Not supported / Not needed */
+
diff --git a/src/VBox/Runtime/r0drv/linux/time-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/time-r0drv-linux.c
new file mode 100644
index 00000000..399f6998
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/time-r0drv-linux.c
@@ -0,0 +1,196 @@
+/* $Id: time-r0drv-linux.c $ */
+/** @file
+ * IPRT - Time, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP RTLOGGROUP_TIME
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/time.h>
+#include <iprt/asm.h>
+
+
+
+DECLINLINE(uint64_t) rtTimeGetSystemNanoTS(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 16) /* This must match timer-r0drv-linux.c! */
+ /*
+ * Use ktime_get_ts, this is also what clock_gettime(CLOCK_MONOTONIC,) is using.
+ */
+ uint64_t u64;
+ struct timespec Ts;
+ ktime_get_ts(&Ts);
+ u64 = Ts.tv_sec * RT_NS_1SEC_64 + Ts.tv_nsec;
+ return u64;
+
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 60)
+ /*
+ * Seems there is no way of getting to the exact source of
+ * sys_clock_gettime(CLOCK_MONOTONIC, &ts) here, I think. But
+ * 64-bit jiffies adjusted for the initial value should be pretty
+ * much the same I hope.
+ */
+ uint64_t u64 = get_jiffies_64();
+# ifdef INITIAL_JIFFIES
+ u64 += INITIAL_JIFFIES;
+# endif
+ u64 *= TICK_NSEC;
+ return u64;
+
+#else /* < 2.5.60 */
+# if BITS_PER_LONG >= 64
+ /*
+ * This is the same as above, except that there is no get_jiffies_64()
+ * here and we rely on long, and therefor jiffies, being 64-bit instead.
+ */
+ uint64_t u64 = jiffies;
+# ifdef INITIAL_JIFFIES
+ u64 += INITIAL_JIFFIES;
+# endif
+ u64 *= TICK_NSEC;
+ return u64;
+
+# else /* 32 bit jiffies */
+ /*
+ * We'll have to try track jiffy rollovers here or we'll be
+ * in trouble every time it flips.
+ *
+ * The high dword of the s_u64Last is the rollover count, the
+ * low dword is the previous jiffies. Updating is done by
+ * atomic compare & exchange of course.
+ */
+ static uint64_t volatile s_u64Last = 0;
+ uint64_t u64;
+
+ for (;;)
+ {
+ uint64_t u64NewLast;
+ int32_t iDelta;
+ uint32_t cRollovers;
+ uint32_t u32LastJiffies;
+
+ /* sample the values */
+ unsigned long ulNow = jiffies;
+ uint64_t u64Last = s_u64Last;
+ if (ulNow != jiffies)
+ continue; /* try again */
+# ifdef INITIAL_JIFFIES
+ ulNow += INITIAL_JIFFIES;
+# endif
+
+ u32LastJiffies = (uint32_t)u64Last;
+ cRollovers = u64Last >> 32;
+
+ /*
+ * Check for rollover and update the static last value.
+ *
+ * We have to make sure we update it successfully to rule out
+ * an underrun because of racing someone.
+ */
+ iDelta = ulNow - u32LastJiffies;
+ if (iDelta < 0)
+ {
+ cRollovers++;
+ u64NewLast = RT_MAKE_U64(ulNow, cRollovers);
+ if (!ASMAtomicCmpXchgU64(&s_u64Last, u64NewLast, u64Last))
+ continue; /* race, try again */
+ }
+ else
+ {
+ u64NewLast = RT_MAKE_U64(ulNow, cRollovers);
+ ASMAtomicCmpXchgU64(&s_u64Last, u64NewLast, u64Last);
+ }
+
+ /* calculate the return value */
+ u64 = ulNow;
+ u64 *= TICK_NSEC;
+ u64 += cRollovers * (_4G * TICK_NSEC);
+ break;
+ }
+
+ return u64;
+# endif /* 32 bit jiffies */
+#endif /* < 2.5.60 */
+}
+
+
+RTDECL(uint64_t) RTTimeNanoTS(void)
+{
+ return rtTimeGetSystemNanoTS();
+}
+RT_EXPORT_SYMBOL(RTTimeNanoTS);
+
+
+RTDECL(uint64_t) RTTimeMilliTS(void)
+{
+ return rtTimeGetSystemNanoTS() / RT_NS_1MS;
+}
+RT_EXPORT_SYMBOL(RTTimeMilliTS);
+
+
+RTDECL(uint64_t) RTTimeSystemNanoTS(void)
+{
+ return rtTimeGetSystemNanoTS();
+}
+RT_EXPORT_SYMBOL(RTTimeSystemNanoTS);
+
+
+RTDECL(uint64_t) RTTimeSystemMilliTS(void)
+{
+ return rtTimeGetSystemNanoTS() / RT_NS_1MS;
+}
+RT_EXPORT_SYMBOL(RTTimeSystemMilliTS);
+
+
+RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
+{
+ IPRT_LINUX_SAVE_EFL_AC();
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 16)
+/* On Linux 4.20, time.h includes time64.h and we have to use 64-bit times. */
+# ifdef _LINUX_TIME64_H
+ struct timespec64 Ts;
+ ktime_get_real_ts64(&Ts);
+# else
+ struct timespec Ts;
+ ktime_get_real_ts(&Ts);
+# endif
+ IPRT_LINUX_RESTORE_EFL_AC();
+# ifdef _LINUX_TIME64_H
+ return RTTimeSpecSetTimespec64(pTime, &Ts);
+#else
+ return RTTimeSpecSetTimespec(pTime, &Ts);
+#endif
+#else /* < 2.6.16 */
+ struct timeval Tv;
+ do_gettimeofday(&Tv);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return RTTimeSpecSetTimeval(pTime, &Tv);
+#endif
+}
+RT_EXPORT_SYMBOL(RTTimeNow);
+
diff --git a/src/VBox/Runtime/r0drv/linux/timer-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/timer-r0drv-linux.c
new file mode 100644
index 00000000..fa37980e
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/timer-r0drv-linux.c
@@ -0,0 +1,1693 @@
+/* $Id: timer-r0drv-linux.c $ */
+/** @file
+ * IPRT - Timers, Ring-0 Driver, Linux.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-linux-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/timer.h>
+#include <iprt/time.h>
+#include <iprt/mp.h>
+#include <iprt/cpuset.h>
+#include <iprt/spinlock.h>
+#include <iprt/err.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/alloc.h>
+
+#include "internal/magics.h"
+
+/** @def RTTIMER_LINUX_WITH_HRTIMER
+ * Whether to use high resolution timers. */
+#if !defined(RTTIMER_LINUX_WITH_HRTIMER) \
+ && defined(IPRT_LINUX_HAS_HRTIMER)
+# define RTTIMER_LINUX_WITH_HRTIMER
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+# define mod_timer_pinned mod_timer
+# define HRTIMER_MODE_ABS_PINNED HRTIMER_MODE_ABS
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Timer state machine.
+ *
+ * This is used to try handle the issues with MP events and
+ * timers that runs on all CPUs. It's relatively nasty :-/
+ */
+typedef enum RTTIMERLNXSTATE
+{
+ /** Stopped. */
+ RTTIMERLNXSTATE_STOPPED = 0,
+ /** Transient state; next ACTIVE. */
+ RTTIMERLNXSTATE_STARTING,
+ /** Transient state; next ACTIVE. (not really necessary) */
+ RTTIMERLNXSTATE_MP_STARTING,
+ /** Active. */
+ RTTIMERLNXSTATE_ACTIVE,
+ /** Active and in callback; next ACTIVE, STOPPED or CALLBACK_DESTROYING. */
+ RTTIMERLNXSTATE_CALLBACK,
+ /** Stopped while in the callback; next STOPPED. */
+ RTTIMERLNXSTATE_CB_STOPPING,
+ /** Restarted while in the callback; next ACTIVE, STOPPED, DESTROYING. */
+ RTTIMERLNXSTATE_CB_RESTARTING,
+ /** The callback shall destroy the timer; next STOPPED. */
+ RTTIMERLNXSTATE_CB_DESTROYING,
+ /** Transient state; next STOPPED. */
+ RTTIMERLNXSTATE_STOPPING,
+ /** Transient state; next STOPPED. */
+ RTTIMERLNXSTATE_MP_STOPPING,
+ /** The usual 32-bit hack. */
+ RTTIMERLNXSTATE_32BIT_HACK = 0x7fffffff
+} RTTIMERLNXSTATE;
+
+
+/**
+ * A Linux sub-timer.
+ */
+typedef struct RTTIMERLNXSUBTIMER
+{
+ /** Timer specific data. */
+ union
+ {
+#if defined(RTTIMER_LINUX_WITH_HRTIMER)
+ /** High resolution timer. */
+ struct
+ {
+ /** The linux timer structure. */
+ struct hrtimer LnxTimer;
+ } Hr;
+#endif
+ /** Standard timer. */
+ struct
+ {
+ /** The linux timer structure. */
+ struct timer_list LnxTimer;
+ /** The start of the current run (ns).
+ * This is used to calculate when the timer ought to fire the next time. */
+ uint64_t u64NextTS;
+ /** The u64NextTS in jiffies. */
+ unsigned long ulNextJiffies;
+ /** Set when starting or changing the timer so that u64StartTs
+ * and u64NextTS gets reinitialized (eliminating some jitter). */
+ bool volatile fFirstAfterChg;
+ } Std;
+ } u;
+ /** The current tick number. */
+ uint64_t iTick;
+ /** Restart the single shot timer at this specific time.
+ * Used when a single shot timer is restarted from the callback. */
+ uint64_t volatile uNsRestartAt;
+ /** Pointer to the parent timer. */
+ PRTTIMER pParent;
+ /** The current sub-timer state. */
+ RTTIMERLNXSTATE volatile enmState;
+} RTTIMERLNXSUBTIMER;
+/** Pointer to a linux sub-timer. */
+typedef RTTIMERLNXSUBTIMER *PRTTIMERLNXSUBTIMER;
+
+
+/**
+ * The internal representation of an Linux timer handle.
+ */
+typedef struct RTTIMER
+{
+ /** Magic.
+ * This is RTTIMER_MAGIC, but changes to something else before the timer
+ * is destroyed to indicate clearly that thread should exit. */
+ uint32_t volatile u32Magic;
+ /** Spinlock synchronizing the fSuspended and MP event handling.
+ * This is NIL_RTSPINLOCK if cCpus == 1. */
+ RTSPINLOCK hSpinlock;
+ /** Flag indicating that the timer is suspended. */
+ bool volatile fSuspended;
+ /** Whether the timer must run on one specific CPU or not. */
+ bool fSpecificCpu;
+#ifdef CONFIG_SMP
+ /** Whether the timer must run on all CPUs or not. */
+ bool fAllCpus;
+#endif /* else: All -> specific on non-SMP kernels */
+ /** Whether it is a high resolution timer or a standard one. */
+ bool fHighRes;
+ /** The id of the CPU it must run on if fSpecificCpu is set. */
+ RTCPUID idCpu;
+ /** The number of CPUs this timer should run on. */
+ RTCPUID cCpus;
+ /** Callback. */
+ PFNRTTIMER pfnTimer;
+ /** User argument. */
+ void *pvUser;
+ /** The timer interval. 0 if one-shot. */
+ uint64_t volatile u64NanoInterval;
+ /** This is set to the number of jiffies between ticks if the interval is
+ * an exact number of jiffies. (Standard timers only.) */
+ unsigned long volatile cJiffies;
+ /** The change interval spinlock for standard timers only. */
+ spinlock_t ChgIntLock;
+ /** Workqueue item for delayed destruction. */
+ RTR0LNXWORKQUEUEITEM DtorWorkqueueItem;
+ /** Sub-timers.
+ * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
+ * an entry for all possible cpus. In that case the index will be the same as
+ * for the RTCpuSet. */
+ RTTIMERLNXSUBTIMER aSubTimers[1];
+} RTTIMER;
+
+
+/**
+ * A rtTimerLinuxStartOnCpu and rtTimerLinuxStartOnCpu argument package.
+ */
+typedef struct RTTIMERLINUXSTARTONCPUARGS
+{
+ /** The current time (RTTimeSystemNanoTS). */
+ uint64_t u64Now;
+ /** When to start firing (delta). */
+ uint64_t u64First;
+} RTTIMERLINUXSTARTONCPUARGS;
+/** Pointer to a rtTimerLinuxStartOnCpu argument package. */
+typedef RTTIMERLINUXSTARTONCPUARGS *PRTTIMERLINUXSTARTONCPUARGS;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+#ifdef CONFIG_SMP
+static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
+#endif
+
+#if 0
+#define DEBUG_HACKING
+#include <iprt/string.h>
+#include <iprt/asm-amd64-x86.h>
+static void myLogBackdoorPrintf(const char *pszFormat, ...)
+{
+ char szTmp[256];
+ va_list args;
+ size_t cb;
+
+ cb = RTStrPrintf(szTmp, sizeof(szTmp) - 10, "%d: ", RTMpCpuId());
+ va_start(args, pszFormat);
+ cb += RTStrPrintfV(&szTmp[cb], sizeof(szTmp) - cb, pszFormat, args);
+ va_end(args);
+
+ ASMOutStrU8(0x504, (uint8_t *)&szTmp[0], cb);
+}
+# define RTAssertMsg1Weak(pszExpr, uLine, pszFile, pszFunction) \
+ myLogBackdoorPrintf("\n!!Guest Assertion failed!!\n%s(%d) %s\n%s\n", uLine, pszFile, pszFunction, (pszExpr))
+# define RTAssertMsg2Weak myLogBackdoorPrintf
+# define RTTIMERLNX_LOG(a) myLogBackdoorPrintf a
+#else
+# define RTTIMERLNX_LOG(a) do { } while (0)
+#endif
+
+/**
+ * Sets the state.
+ */
+DECLINLINE(void) rtTimerLnxSetState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState)
+{
+#ifdef DEBUG_HACKING
+ RTTIMERLNX_LOG(("set %d -> %d\n", *penmState, enmNewState));
+#endif
+ ASMAtomicWriteU32((uint32_t volatile *)penmState, enmNewState);
+}
+
+
+/**
+ * Sets the state if it has a certain value.
+ *
+ * @return true if xchg was done.
+ * @return false if xchg wasn't done.
+ */
+#ifdef DEBUG_HACKING
+#define rtTimerLnxCmpXchgState(penmState, enmNewState, enmCurState) rtTimerLnxCmpXchgStateDebug(penmState, enmNewState, enmCurState, __LINE__)
+static bool rtTimerLnxCmpXchgStateDebug(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState,
+ RTTIMERLNXSTATE enmCurState, uint32_t uLine)
+{
+ RTTIMERLNXSTATE enmOldState = enmCurState;
+ bool fRc = ASMAtomicCmpXchgExU32((uint32_t volatile *)penmState, enmNewState, enmCurState, (uint32_t *)&enmOldState);
+ RTTIMERLNX_LOG(("cxg %d -> %d - %d at %u\n", enmOldState, enmNewState, fRc, uLine));
+ return fRc;
+}
+#else
+DECLINLINE(bool) rtTimerLnxCmpXchgState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState,
+ RTTIMERLNXSTATE enmCurState)
+{
+ return ASMAtomicCmpXchgU32((uint32_t volatile *)penmState, enmNewState, enmCurState);
+}
+#endif
+
+
+/**
+ * Gets the state.
+ */
+DECLINLINE(RTTIMERLNXSTATE) rtTimerLnxGetState(RTTIMERLNXSTATE volatile *penmState)
+{
+ return (RTTIMERLNXSTATE)ASMAtomicUoReadU32((uint32_t volatile *)penmState);
+}
+
+#ifdef RTTIMER_LINUX_WITH_HRTIMER
+
+/**
+ * Converts a nano second time stamp to ktime_t.
+ *
+ * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
+ *
+ * @returns ktime_t.
+ * @param cNanoSecs Nanoseconds.
+ */
+DECLINLINE(ktime_t) rtTimerLnxNanoToKt(uint64_t cNanoSecs)
+{
+ /* With some luck the compiler optimizes the division out of this... (Bet it doesn't.) */
+ return ktime_set(cNanoSecs / 1000000000, cNanoSecs % 1000000000);
+}
+
+/**
+ * Converts ktime_t to a nano second time stamp.
+ *
+ * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
+ *
+ * @returns nano second time stamp.
+ * @param Kt ktime_t.
+ */
+DECLINLINE(uint64_t) rtTimerLnxKtToNano(ktime_t Kt)
+{
+ return ktime_to_ns(Kt);
+}
+
+#endif /* RTTIMER_LINUX_WITH_HRTIMER */
+
+/**
+ * Converts a nano second interval to jiffies.
+ *
+ * @returns Jiffies.
+ * @param cNanoSecs Nanoseconds.
+ */
+DECLINLINE(unsigned long) rtTimerLnxNanoToJiffies(uint64_t cNanoSecs)
+{
+ /* this can be made even better... */
+ if (cNanoSecs > (uint64_t)TICK_NSEC * MAX_JIFFY_OFFSET)
+ return MAX_JIFFY_OFFSET;
+# if ARCH_BITS == 32
+ if (RT_LIKELY(cNanoSecs <= UINT32_MAX))
+ return ((uint32_t)cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
+# endif
+ return (cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
+}
+
+
+/**
+ * Starts a sub-timer (RTTimerStart).
+ *
+ * @param pSubTimer The sub-timer to start.
+ * @param u64Now The current timestamp (RTTimeSystemNanoTS()).
+ * @param u64First The interval from u64Now to the first time the timer should fire.
+ * @param fPinned true = timer pinned to a specific CPU,
+ * false = timer can migrate between CPUs
+ * @param fHighRes Whether the user requested a high resolution timer or not.
+ * @param enmOldState The old timer state.
+ */
+static void rtTimerLnxStartSubTimer(PRTTIMERLNXSUBTIMER pSubTimer, uint64_t u64Now, uint64_t u64First,
+ bool fPinned, bool fHighRes)
+{
+ /*
+ * Calc when it should start firing.
+ */
+ uint64_t u64NextTS = u64Now + u64First;
+ if (!fHighRes)
+ pSubTimer->u.Std.u64NextTS = u64NextTS;
+ RTTIMERLNX_LOG(("startsubtimer %p\n", pSubTimer->pParent));
+
+ pSubTimer->iTick = 0;
+
+#ifdef RTTIMER_LINUX_WITH_HRTIMER
+ if (fHighRes)
+ hrtimer_start(&pSubTimer->u.Hr.LnxTimer, rtTimerLnxNanoToKt(u64NextTS),
+ fPinned ? HRTIMER_MODE_ABS_PINNED : HRTIMER_MODE_ABS);
+ else
+#endif
+ {
+ unsigned long cJiffies = !u64First ? 0 : rtTimerLnxNanoToJiffies(u64First);
+ pSubTimer->u.Std.ulNextJiffies = jiffies + cJiffies;
+ pSubTimer->u.Std.fFirstAfterChg = true;
+#ifdef CONFIG_SMP
+ if (fPinned)
+ {
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
+# else
+ mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
+# endif
+ }
+ else
+#endif
+ mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
+ }
+
+ /* Be a bit careful here since we could be racing the callback. */
+ if (!rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_STARTING))
+ rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_MP_STARTING);
+}
+
+
+/**
+ * Stops a sub-timer (RTTimerStart and rtTimerLinuxMpEvent()).
+ *
+ * The caller has already changed the state, so we will not be in a callback
+ * situation wrt to the calling thread.
+ *
+ * @param pSubTimer The sub-timer.
+ * @param fHighRes Whether the user requested a high resolution timer or not.
+ */
+static void rtTimerLnxStopSubTimer(PRTTIMERLNXSUBTIMER pSubTimer, bool fHighRes)
+{
+ RTTIMERLNX_LOG(("stopsubtimer %p %d\n", pSubTimer->pParent, fHighRes));
+#ifdef RTTIMER_LINUX_WITH_HRTIMER
+ if (fHighRes)
+ {
+ /* There is no equivalent to del_timer in the hrtimer API,
+ hrtimer_cancel() == del_timer_sync(). Just like the WARN_ON in
+ del_timer_sync() asserts, waiting for a timer callback to complete
+ is deadlock prone, so don't do it. */
+ int rc = hrtimer_try_to_cancel(&pSubTimer->u.Hr.LnxTimer);
+ if (rc < 0)
+ {
+ hrtimer_start(&pSubTimer->u.Hr.LnxTimer, ktime_set(KTIME_SEC_MAX, 0), HRTIMER_MODE_ABS);
+ hrtimer_try_to_cancel(&pSubTimer->u.Hr.LnxTimer);
+ }
+ }
+ else
+#endif
+ del_timer(&pSubTimer->u.Std.LnxTimer);
+
+ rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
+}
+
+
+/**
+ * Used by RTTimerDestroy and rtTimerLnxCallbackDestroy to do the actual work.
+ *
+ * @param pTimer The timer in question.
+ */
+static void rtTimerLnxDestroyIt(PRTTIMER pTimer)
+{
+ RTSPINLOCK hSpinlock = pTimer->hSpinlock;
+ RTCPUID iCpu;
+ Assert(pTimer->fSuspended);
+ RTTIMERLNX_LOG(("destroyit %p\n", pTimer));
+
+ /*
+ * Remove the MP notifications first because it'll reduce the risk of
+ * us overtaking any MP event that might theoretically be racing us here.
+ */
+#ifdef CONFIG_SMP
+ if ( pTimer->cCpus > 1
+ && hSpinlock != NIL_RTSPINLOCK)
+ {
+ int rc = RTMpNotificationDeregister(rtTimerLinuxMpEvent, pTimer);
+ AssertRC(rc);
+ }
+#endif /* CONFIG_SMP */
+
+ /*
+ * Invalidate the handle.
+ */
+ ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
+
+ /*
+ * Make sure all timers have stopped executing since we're stopping them in
+ * an asynchronous manner up in rtTimerLnxStopSubTimer.
+ */
+ iCpu = pTimer->cCpus;
+ while (iCpu-- > 0)
+ {
+#ifdef RTTIMER_LINUX_WITH_HRTIMER
+ if (pTimer->fHighRes)
+ hrtimer_cancel(&pTimer->aSubTimers[iCpu].u.Hr.LnxTimer);
+ else
+#endif
+ del_timer_sync(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
+ }
+
+ /*
+ * Finally, free the resources.
+ */
+ RTMemFreeEx(pTimer, RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[pTimer->cCpus]));
+ if (hSpinlock != NIL_RTSPINLOCK)
+ RTSpinlockDestroy(hSpinlock);
+}
+
+
+/**
+ * Workqueue callback (no DECLCALLBACK!) for deferred destruction.
+ *
+ * @param pWork Pointer to the DtorWorkqueueItem member of our timer
+ * structure.
+ */
+static void rtTimerLnxDestroyDeferred(RTR0LNXWORKQUEUEITEM *pWork)
+{
+ PRTTIMER pTimer = RT_FROM_MEMBER(pWork, RTTIMER, DtorWorkqueueItem);
+ rtTimerLnxDestroyIt(pTimer);
+}
+
+
+/**
+ * Called when the timer was destroyed by the callback function.
+ *
+ * @param pTimer The timer.
+ * @param pSubTimer The sub-timer which we're handling, the state of this
+ * will be RTTIMERLNXSTATE_CALLBACK_DESTROYING.
+ */
+static void rtTimerLnxCallbackDestroy(PRTTIMER pTimer, PRTTIMERLNXSUBTIMER pSubTimer)
+{
+ /*
+ * If it's an omni timer, the last dude does the destroying.
+ */
+ if (pTimer->cCpus > 1)
+ {
+ uint32_t iCpu = pTimer->cCpus;
+ RTSpinlockAcquire(pTimer->hSpinlock);
+
+ Assert(pSubTimer->enmState == RTTIMERLNXSTATE_CB_DESTROYING);
+ rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
+
+ while (iCpu-- > 0)
+ if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) != RTTIMERLNXSTATE_STOPPED)
+ {
+ RTSpinlockRelease(pTimer->hSpinlock);
+ return;
+ }
+
+ RTSpinlockRelease(pTimer->hSpinlock);
+ }
+
+ /*
+ * Destroying a timer from the callback is unsafe since the callout code
+ * might be touching the timer structure upon return (hrtimer does!). So,
+ * we have to defer the actual destruction to the IRPT workqueue.
+ */
+ rtR0LnxWorkqueuePush(&pTimer->DtorWorkqueueItem, rtTimerLnxDestroyDeferred);
+}
+
+
+#ifdef CONFIG_SMP
+/**
+ * Deal with a sub-timer that has migrated.
+ *
+ * @param pTimer The timer.
+ * @param pSubTimer The sub-timer.
+ */
+static void rtTimerLnxCallbackHandleMigration(PRTTIMER pTimer, PRTTIMERLNXSUBTIMER pSubTimer)
+{
+ RTTIMERLNXSTATE enmState;
+ if (pTimer->cCpus > 1)
+ RTSpinlockAcquire(pTimer->hSpinlock);
+
+ do
+ {
+ enmState = rtTimerLnxGetState(&pSubTimer->enmState);
+ switch (enmState)
+ {
+ case RTTIMERLNXSTATE_STOPPING:
+ case RTTIMERLNXSTATE_MP_STOPPING:
+ enmState = RTTIMERLNXSTATE_STOPPED;
+ case RTTIMERLNXSTATE_STOPPED:
+ break;
+
+ default:
+ AssertMsgFailed(("%d\n", enmState));
+ case RTTIMERLNXSTATE_STARTING:
+ case RTTIMERLNXSTATE_MP_STARTING:
+ case RTTIMERLNXSTATE_ACTIVE:
+ case RTTIMERLNXSTATE_CALLBACK:
+ case RTTIMERLNXSTATE_CB_STOPPING:
+ case RTTIMERLNXSTATE_CB_RESTARTING:
+ if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, enmState))
+ enmState = RTTIMERLNXSTATE_STOPPED;
+ break;
+
+ case RTTIMERLNXSTATE_CB_DESTROYING:
+ {
+ if (pTimer->cCpus > 1)
+ RTSpinlockRelease(pTimer->hSpinlock);
+
+ rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
+ return;
+ }
+ }
+ } while (enmState != RTTIMERLNXSTATE_STOPPED);
+
+ if (pTimer->cCpus > 1)
+ RTSpinlockRelease(pTimer->hSpinlock);
+}
+#endif /* CONFIG_SMP */
+
+
+/**
+ * The slow path of rtTimerLnxChangeToCallbackState.
+ *
+ * @returns true if changed successfully, false if not.
+ * @param pSubTimer The sub-timer.
+ */
+static bool rtTimerLnxChangeToCallbackStateSlow(PRTTIMERLNXSUBTIMER pSubTimer)
+{
+ for (;;)
+ {
+ RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
+ switch (enmState)
+ {
+ case RTTIMERLNXSTATE_ACTIVE:
+ case RTTIMERLNXSTATE_STARTING:
+ case RTTIMERLNXSTATE_MP_STARTING:
+ if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CALLBACK, enmState))
+ return true;
+ break;
+
+ case RTTIMERLNXSTATE_CALLBACK:
+ case RTTIMERLNXSTATE_CB_STOPPING:
+ case RTTIMERLNXSTATE_CB_RESTARTING:
+ case RTTIMERLNXSTATE_CB_DESTROYING:
+ AssertMsgFailed(("%d\n", enmState));
+ default:
+ return false;
+ }
+ ASMNopPause();
+ }
+}
+
+
+/**
+ * Tries to change the sub-timer state to 'callback'.
+ *
+ * @returns true if changed successfully, false if not.
+ * @param pSubTimer The sub-timer.
+ */
+DECLINLINE(bool) rtTimerLnxChangeToCallbackState(PRTTIMERLNXSUBTIMER pSubTimer)
+{
+ if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CALLBACK, RTTIMERLNXSTATE_ACTIVE)))
+ return true;
+ return rtTimerLnxChangeToCallbackStateSlow(pSubTimer);
+}
+
+
+#ifdef RTTIMER_LINUX_WITH_HRTIMER
+/**
+ * Timer callback function for high resolution timers.
+ *
+ * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a
+ * one-shot or interval timer.
+ * @param pHrTimer Pointer to the sub-timer structure.
+ */
+static enum hrtimer_restart rtTimerLinuxHrCallback(struct hrtimer *pHrTimer)
+{
+ PRTTIMERLNXSUBTIMER pSubTimer = RT_FROM_MEMBER(pHrTimer, RTTIMERLNXSUBTIMER, u.Hr.LnxTimer);
+ PRTTIMER pTimer = pSubTimer->pParent;
+
+
+ RTTIMERLNX_LOG(("hrcallback %p\n", pTimer));
+ if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer)))
+ return HRTIMER_NORESTART;
+
+#ifdef CONFIG_SMP
+ /*
+ * Check for unwanted migration.
+ */
+ if (pTimer->fAllCpus || pTimer->fSpecificCpu)
+ {
+ RTCPUID idCpu = RTMpCpuId();
+ if (RT_UNLIKELY( pTimer->fAllCpus
+ ? (RTCPUID)(pSubTimer - &pTimer->aSubTimers[0]) != idCpu
+ : pTimer->idCpu != idCpu))
+ {
+ rtTimerLnxCallbackHandleMigration(pTimer, pSubTimer);
+ return HRTIMER_NORESTART;
+ }
+ }
+#endif
+
+ if (pTimer->u64NanoInterval)
+ {
+ /*
+ * Periodic timer, run it and update the native timer afterwards so
+ * we can handle RTTimerStop and RTTimerChangeInterval from the
+ * callback as well as a racing control thread.
+ */
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
+ hrtimer_add_expires_ns(&pSubTimer->u.Hr.LnxTimer, ASMAtomicReadU64(&pTimer->u64NanoInterval));
+ if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CALLBACK)))
+ return HRTIMER_RESTART;
+ }
+ else
+ {
+ /*
+ * One shot timer (no omni), stop it before dispatching it.
+ * Allow RTTimerStart as well as RTTimerDestroy to be called from
+ * the callback.
+ */
+ ASMAtomicWriteBool(&pTimer->fSuspended, true);
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
+ if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CALLBACK)))
+ return HRTIMER_NORESTART;
+ }
+
+ /*
+ * Some state change occurred while we were in the callback routine.
+ */
+ for (;;)
+ {
+ RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
+ switch (enmState)
+ {
+ case RTTIMERLNXSTATE_CB_DESTROYING:
+ rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
+ return HRTIMER_NORESTART;
+
+ case RTTIMERLNXSTATE_CB_STOPPING:
+ if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CB_STOPPING))
+ return HRTIMER_NORESTART;
+ break;
+
+ case RTTIMERLNXSTATE_CB_RESTARTING:
+ if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CB_RESTARTING))
+ {
+ pSubTimer->iTick = 0;
+ hrtimer_set_expires(&pSubTimer->u.Hr.LnxTimer, rtTimerLnxNanoToKt(pSubTimer->uNsRestartAt));
+ return HRTIMER_RESTART;
+ }
+ break;
+
+ default:
+ AssertMsgFailed(("%d\n", enmState));
+ return HRTIMER_NORESTART;
+ }
+ ASMNopPause();
+ }
+}
+#endif /* RTTIMER_LINUX_WITH_HRTIMER */
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+/**
+ * Timer callback function for standard timers.
+ *
+ * @param pLnxTimer Pointer to the Linux timer structure.
+ */
+static void rtTimerLinuxStdCallback(struct timer_list *pLnxTimer)
+{
+ PRTTIMERLNXSUBTIMER pSubTimer = from_timer(pSubTimer, pLnxTimer, u.Std.LnxTimer);
+#else
+/**
+ * Timer callback function for standard timers.
+ *
+ * @param ulUser Address of the sub-timer structure.
+ */
+static void rtTimerLinuxStdCallback(unsigned long ulUser)
+{
+ PRTTIMERLNXSUBTIMER pSubTimer = (PRTTIMERLNXSUBTIMER)ulUser;
+#endif
+ PRTTIMER pTimer = pSubTimer->pParent;
+
+ RTTIMERLNX_LOG(("stdcallback %p\n", pTimer));
+ if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer)))
+ return;
+
+#ifdef CONFIG_SMP
+ /*
+ * Check for unwanted migration.
+ */
+ if (pTimer->fAllCpus || pTimer->fSpecificCpu)
+ {
+ RTCPUID idCpu = RTMpCpuId();
+ if (RT_UNLIKELY( pTimer->fAllCpus
+ ? (RTCPUID)(pSubTimer - &pTimer->aSubTimers[0]) != idCpu
+ : pTimer->idCpu != idCpu))
+ {
+ rtTimerLnxCallbackHandleMigration(pTimer, pSubTimer);
+ return;
+ }
+ }
+#endif
+
+ if (pTimer->u64NanoInterval)
+ {
+ /*
+ * Interval timer, calculate the next timeout.
+ *
+ * The first time around, we'll re-adjust the u.Std.u64NextTS to
+ * try prevent some jittering if we were started at a bad time.
+ */
+ const uint64_t iTick = ++pSubTimer->iTick;
+ uint64_t u64NanoInterval;
+ unsigned long cJiffies;
+ unsigned long flFlags;
+
+ spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
+ u64NanoInterval = pTimer->u64NanoInterval;
+ cJiffies = pTimer->cJiffies;
+ if (RT_UNLIKELY(pSubTimer->u.Std.fFirstAfterChg))
+ {
+ pSubTimer->u.Std.fFirstAfterChg = false;
+ pSubTimer->u.Std.u64NextTS = RTTimeSystemNanoTS();
+ pSubTimer->u.Std.ulNextJiffies = jiffies;
+ }
+ spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
+
+ pSubTimer->u.Std.u64NextTS += u64NanoInterval;
+ if (cJiffies)
+ {
+ pSubTimer->u.Std.ulNextJiffies += cJiffies;
+ /* Prevent overflows when the jiffies counter wraps around.
+ * Special thanks to Ken Preslan for helping debugging! */
+ while (time_before(pSubTimer->u.Std.ulNextJiffies, jiffies))
+ {
+ pSubTimer->u.Std.ulNextJiffies += cJiffies;
+ pSubTimer->u.Std.u64NextTS += u64NanoInterval;
+ }
+ }
+ else
+ {
+ const uint64_t u64NanoTS = RTTimeSystemNanoTS();
+ while (pSubTimer->u.Std.u64NextTS < u64NanoTS)
+ pSubTimer->u.Std.u64NextTS += u64NanoInterval;
+ pSubTimer->u.Std.ulNextJiffies = jiffies + rtTimerLnxNanoToJiffies(pSubTimer->u.Std.u64NextTS - u64NanoTS);
+ }
+
+ /*
+ * Run the timer and re-arm it unless the state changed .
+ * .
+ * We must re-arm it afterwards as we're not in a position to undo this .
+ * operation if for instance someone stopped or destroyed us while we .
+ * were in the callback. (Linux takes care of any races here.)
+ */
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
+ if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CALLBACK)))
+ {
+#ifdef CONFIG_SMP
+ if (pTimer->fSpecificCpu || pTimer->fAllCpus)
+ {
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
+# else
+ mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
+# endif
+ }
+ else
+#endif
+ mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
+ return;
+ }
+ }
+ else
+ {
+ /*
+ * One shot timer, stop it before dispatching it.
+ * Allow RTTimerStart as well as RTTimerDestroy to be called from
+ * the callback.
+ */
+ ASMAtomicWriteBool(&pTimer->fSuspended, true);
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
+ if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CALLBACK)))
+ return;
+ }
+
+ /*
+ * Some state change occurred while we were in the callback routine.
+ */
+ for (;;)
+ {
+ RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
+ switch (enmState)
+ {
+ case RTTIMERLNXSTATE_CB_DESTROYING:
+ rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
+ return;
+
+ case RTTIMERLNXSTATE_CB_STOPPING:
+ if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CB_STOPPING))
+ return;
+ break;
+
+ case RTTIMERLNXSTATE_CB_RESTARTING:
+ if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CB_RESTARTING))
+ {
+ uint64_t u64NanoTS;
+ uint64_t u64NextTS;
+ unsigned long flFlags;
+
+ spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
+ u64NextTS = pSubTimer->uNsRestartAt;
+ u64NanoTS = RTTimeSystemNanoTS();
+ pSubTimer->iTick = 0;
+ pSubTimer->u.Std.u64NextTS = u64NextTS;
+ pSubTimer->u.Std.fFirstAfterChg = true;
+ pSubTimer->u.Std.ulNextJiffies = u64NextTS > u64NanoTS
+ ? jiffies + rtTimerLnxNanoToJiffies(u64NextTS - u64NanoTS)
+ : jiffies;
+ spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
+
+#ifdef CONFIG_SMP
+ if (pTimer->fSpecificCpu || pTimer->fAllCpus)
+ {
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
+# else
+ mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
+# endif
+ }
+ else
+#endif
+ mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
+ return;
+ }
+ break;
+
+ default:
+ AssertMsgFailed(("%d\n", enmState));
+ return;
+ }
+ ASMNopPause();
+ }
+}
+
+
+#ifdef CONFIG_SMP
+
+/**
+ * Per-cpu callback function (RTMpOnAll/RTMpOnSpecific).
+ *
+ * @param idCpu The current CPU.
+ * @param pvUser1 Pointer to the timer.
+ * @param pvUser2 Pointer to the argument structure.
+ */
+static DECLCALLBACK(void) rtTimerLnxStartAllOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
+{
+ PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
+ PRTTIMER pTimer = (PRTTIMER)pvUser1;
+ Assert(idCpu < pTimer->cCpus);
+ rtTimerLnxStartSubTimer(&pTimer->aSubTimers[idCpu], pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
+}
+
+
+/**
+ * Worker for RTTimerStart() that takes care of the ugly bits.
+ *
+ * @returns RTTimerStart() return value.
+ * @param pTimer The timer.
+ * @param pArgs The argument structure.
+ */
+static int rtTimerLnxOmniStart(PRTTIMER pTimer, PRTTIMERLINUXSTARTONCPUARGS pArgs)
+{
+ RTCPUID iCpu;
+ RTCPUSET OnlineSet;
+ RTCPUSET OnlineSet2;
+ int rc2;
+
+ /*
+ * Prepare all the sub-timers for the startup and then flag the timer
+ * as a whole as non-suspended, make sure we get them all before
+ * clearing fSuspended as the MP handler will be waiting on this
+ * should something happen while we're looping.
+ */
+ RTSpinlockAcquire(pTimer->hSpinlock);
+
+ /* Just make it a omni timer restriction that no stop/start races are allowed. */
+ for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
+ if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) != RTTIMERLNXSTATE_STOPPED)
+ {
+ RTSpinlockRelease(pTimer->hSpinlock);
+ return VERR_TIMER_BUSY;
+ }
+
+ do
+ {
+ RTMpGetOnlineSet(&OnlineSet);
+ for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
+ {
+ Assert(pTimer->aSubTimers[iCpu].enmState != RTTIMERLNXSTATE_MP_STOPPING);
+ rtTimerLnxSetState(&pTimer->aSubTimers[iCpu].enmState,
+ RTCpuSetIsMember(&OnlineSet, iCpu)
+ ? RTTIMERLNXSTATE_STARTING
+ : RTTIMERLNXSTATE_STOPPED);
+ }
+ } while (!RTCpuSetIsEqual(&OnlineSet, RTMpGetOnlineSet(&OnlineSet2)));
+
+ ASMAtomicWriteBool(&pTimer->fSuspended, false);
+
+ RTSpinlockRelease(pTimer->hSpinlock);
+
+ /*
+ * Start them (can't find any exported function that allows me to
+ * do this without the cross calls).
+ */
+ pArgs->u64Now = RTTimeSystemNanoTS();
+ rc2 = RTMpOnAll(rtTimerLnxStartAllOnCpu, pTimer, pArgs);
+ AssertRC(rc2); /* screw this if it fails. */
+
+ /*
+ * Reset the sub-timers who didn't start up (ALL CPUs case).
+ */
+ RTSpinlockAcquire(pTimer->hSpinlock);
+
+ for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
+ if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_STARTING))
+ {
+ /** @todo very odd case for a rainy day. Cpus that temporarily went offline while
+ * we were between calls needs to nudged as the MP handler will ignore events for
+ * them because of the STARTING state. This is an extremely unlikely case - not that
+ * that means anything in my experience... ;-) */
+ RTTIMERLNX_LOG(("what!? iCpu=%u -> didn't start\n", iCpu));
+ }
+
+ RTSpinlockRelease(pTimer->hSpinlock);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTTimerStop() that takes care of the ugly SMP bits.
+ *
+ * @returns true if there was any active callbacks, false if not.
+ * @param pTimer The timer (valid).
+ * @param fForDestroy Whether this is for RTTimerDestroy or not.
+ */
+static bool rtTimerLnxOmniStop(PRTTIMER pTimer, bool fForDestroy)
+{
+ bool fActiveCallbacks = false;
+ RTCPUID iCpu;
+ RTTIMERLNXSTATE enmState;
+
+
+ /*
+ * Mark the timer as suspended and flag all timers as stopping, except
+ * for those being stopped by an MP event.
+ */
+ RTSpinlockAcquire(pTimer->hSpinlock);
+
+ ASMAtomicWriteBool(&pTimer->fSuspended, true);
+ for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
+ {
+ for (;;)
+ {
+ enmState = rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState);
+ if ( enmState == RTTIMERLNXSTATE_STOPPED
+ || enmState == RTTIMERLNXSTATE_MP_STOPPING)
+ break;
+ if ( enmState == RTTIMERLNXSTATE_CALLBACK
+ || enmState == RTTIMERLNXSTATE_CB_STOPPING
+ || enmState == RTTIMERLNXSTATE_CB_RESTARTING)
+ {
+ Assert(enmState != RTTIMERLNXSTATE_CB_STOPPING || fForDestroy);
+ if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState,
+ !fForDestroy ? RTTIMERLNXSTATE_CB_STOPPING : RTTIMERLNXSTATE_CB_DESTROYING,
+ enmState))
+ {
+ fActiveCallbacks = true;
+ break;
+ }
+ }
+ else
+ {
+ Assert(enmState == RTTIMERLNXSTATE_ACTIVE);
+ if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPING, enmState))
+ break;
+ }
+ ASMNopPause();
+ }
+ }
+
+ RTSpinlockRelease(pTimer->hSpinlock);
+
+ /*
+ * Do the actual stopping. Fortunately, this doesn't require any IPIs.
+ * Unfortunately it cannot be done synchronously.
+ */
+ for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
+ if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) == RTTIMERLNXSTATE_STOPPING)
+ rtTimerLnxStopSubTimer(&pTimer->aSubTimers[iCpu], pTimer->fHighRes);
+
+ return fActiveCallbacks;
+}
+
+
+/**
+ * Per-cpu callback function (RTMpOnSpecific) used by rtTimerLinuxMpEvent()
+ * to start a sub-timer on a cpu that just have come online.
+ *
+ * @param idCpu The current CPU.
+ * @param pvUser1 Pointer to the timer.
+ * @param pvUser2 Pointer to the argument structure.
+ */
+static DECLCALLBACK(void) rtTimerLinuxMpStartOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
+{
+ PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
+ PRTTIMER pTimer = (PRTTIMER)pvUser1;
+ RTSPINLOCK hSpinlock;
+ Assert(idCpu < pTimer->cCpus);
+
+ /*
+ * We have to be kind of careful here as we might be racing RTTimerStop
+ * (and/or RTTimerDestroy, thus the paranoia.
+ */
+ hSpinlock = pTimer->hSpinlock;
+ if ( hSpinlock != NIL_RTSPINLOCK
+ && pTimer->u32Magic == RTTIMER_MAGIC)
+ {
+ RTSpinlockAcquire(hSpinlock);
+
+ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
+ && pTimer->u32Magic == RTTIMER_MAGIC)
+ {
+ /* We're sane and the timer is not suspended yet. */
+ PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
+ if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
+ rtTimerLnxStartSubTimer(pSubTimer, pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
+ }
+
+ RTSpinlockRelease(hSpinlock);
+ }
+}
+
+
+/**
+ * MP event notification callback.
+ *
+ * @param enmEvent The event.
+ * @param idCpu The cpu it applies to.
+ * @param pvUser The timer.
+ */
+static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
+{
+ PRTTIMER pTimer = (PRTTIMER)pvUser;
+ PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
+ RTSPINLOCK hSpinlock;
+
+ Assert(idCpu < pTimer->cCpus);
+
+ /*
+ * Some initial paranoia.
+ */
+ if (pTimer->u32Magic != RTTIMER_MAGIC)
+ return;
+ hSpinlock = pTimer->hSpinlock;
+ if (hSpinlock == NIL_RTSPINLOCK)
+ return;
+
+ RTSpinlockAcquire(hSpinlock);
+
+ /* Is it active? */
+ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
+ && pTimer->u32Magic == RTTIMER_MAGIC)
+ {
+ switch (enmEvent)
+ {
+ /*
+ * Try do it without leaving the spin lock, but if we have to, retake it
+ * when we're on the right cpu.
+ */
+ case RTMPEVENT_ONLINE:
+ if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
+ {
+ RTTIMERLINUXSTARTONCPUARGS Args;
+ Args.u64Now = RTTimeSystemNanoTS();
+ Args.u64First = 0;
+
+ if (RTMpCpuId() == idCpu)
+ rtTimerLnxStartSubTimer(pSubTimer, Args.u64Now, Args.u64First, true /*fPinned*/, pTimer->fHighRes);
+ else
+ {
+ rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED); /* we'll recheck it. */
+ RTSpinlockRelease(hSpinlock);
+
+ RTMpOnSpecific(idCpu, rtTimerLinuxMpStartOnCpu, pTimer, &Args);
+ return; /* we've left the spinlock */
+ }
+ }
+ break;
+
+ /*
+ * The CPU is (going) offline, make sure the sub-timer is stopped.
+ *
+ * Linux will migrate it to a different CPU, but we don't want this. The
+ * timer function is checking for this.
+ */
+ case RTMPEVENT_OFFLINE:
+ {
+ RTTIMERLNXSTATE enmState;
+ while ( (enmState = rtTimerLnxGetState(&pSubTimer->enmState)) == RTTIMERLNXSTATE_ACTIVE
+ || enmState == RTTIMERLNXSTATE_CALLBACK
+ || enmState == RTTIMERLNXSTATE_CB_RESTARTING)
+ {
+ if (enmState == RTTIMERLNXSTATE_ACTIVE)
+ {
+ if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STOPPING, RTTIMERLNXSTATE_ACTIVE))
+ {
+ RTSpinlockRelease(hSpinlock);
+
+ rtTimerLnxStopSubTimer(pSubTimer, pTimer->fHighRes);
+ return; /* we've left the spinlock */
+ }
+ }
+ else if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CB_STOPPING, enmState))
+ break;
+
+ /* State not stable, try again. */
+ ASMNopPause();
+ }
+ break;
+ }
+ }
+ }
+
+ RTSpinlockRelease(hSpinlock);
+}
+
+#endif /* CONFIG_SMP */
+
+
+/**
+ * Callback function use by RTTimerStart via RTMpOnSpecific to start a timer
+ * running on a specific CPU.
+ *
+ * @param idCpu The current CPU.
+ * @param pvUser1 Pointer to the timer.
+ * @param pvUser2 Pointer to the argument structure.
+ */
+static DECLCALLBACK(void) rtTimerLnxStartOnSpecificCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
+{
+ PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
+ PRTTIMER pTimer = (PRTTIMER)pvUser1;
+ RT_NOREF_PV(idCpu);
+ rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
+}
+
+
+RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
+{
+ RTTIMERLINUXSTARTONCPUARGS Args;
+ int rc2;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
+ AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
+
+ if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
+ return VERR_TIMER_ACTIVE;
+ RTTIMERLNX_LOG(("start %p cCpus=%d\n", pTimer, pTimer->cCpus));
+
+ Args.u64First = u64First;
+#ifdef CONFIG_SMP
+ /*
+ * Omni timer?
+ */
+ if (pTimer->fAllCpus)
+ {
+ rc2 = rtTimerLnxOmniStart(pTimer, &Args);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc2;
+ }
+#endif
+
+ /*
+ * Simple timer - Pretty straight forward if it wasn't for restarting.
+ */
+ Args.u64Now = RTTimeSystemNanoTS();
+ ASMAtomicWriteU64(&pTimer->aSubTimers[0].uNsRestartAt, Args.u64Now + u64First);
+ for (;;)
+ {
+ RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[0].enmState);
+ switch (enmState)
+ {
+ case RTTIMERLNXSTATE_STOPPED:
+ if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STARTING, RTTIMERLNXSTATE_STOPPED))
+ {
+ ASMAtomicWriteBool(&pTimer->fSuspended, false);
+ if (!pTimer->fSpecificCpu)
+ rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], Args.u64Now, Args.u64First,
+ false /*fPinned*/, pTimer->fHighRes);
+ else
+ {
+ rc2 = RTMpOnSpecific(pTimer->idCpu, rtTimerLnxStartOnSpecificCpu, pTimer, &Args);
+ if (RT_FAILURE(rc2))
+ {
+ /* Suspend it, the cpu id is probably invalid or offline. */
+ ASMAtomicWriteBool(&pTimer->fSuspended, true);
+ rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPED);
+ return rc2;
+ }
+ }
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+ break;
+
+ case RTTIMERLNXSTATE_CALLBACK:
+ case RTTIMERLNXSTATE_CB_STOPPING:
+ if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_CB_RESTARTING, enmState))
+ {
+ ASMAtomicWriteBool(&pTimer->fSuspended, false);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+ break;
+
+ default:
+ AssertMsgFailed(("%d\n", enmState));
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_INTERNAL_ERROR_4;
+ }
+ ASMNopPause();
+ }
+}
+RT_EXPORT_SYMBOL(RTTimerStart);
+
+
+/**
+ * Common worker for RTTimerStop and RTTimerDestroy.
+ *
+ * @returns true if there was any active callbacks, false if not.
+ * @param pTimer The timer to stop.
+ * @param fForDestroy Whether it's RTTimerDestroy calling or not.
+ */
+static bool rtTimerLnxStop(PRTTIMER pTimer, bool fForDestroy)
+{
+ RTTIMERLNX_LOG(("lnxstop %p %d\n", pTimer, fForDestroy));
+#ifdef CONFIG_SMP
+ /*
+ * Omni timer?
+ */
+ if (pTimer->fAllCpus)
+ return rtTimerLnxOmniStop(pTimer, fForDestroy);
+#endif
+
+ /*
+ * Simple timer.
+ */
+ ASMAtomicWriteBool(&pTimer->fSuspended, true);
+ for (;;)
+ {
+ RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[0].enmState);
+ switch (enmState)
+ {
+ case RTTIMERLNXSTATE_ACTIVE:
+ if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPING, RTTIMERLNXSTATE_ACTIVE))
+ {
+ rtTimerLnxStopSubTimer(&pTimer->aSubTimers[0], pTimer->fHighRes);
+ return false;
+ }
+ break;
+
+ case RTTIMERLNXSTATE_CALLBACK:
+ case RTTIMERLNXSTATE_CB_RESTARTING:
+ case RTTIMERLNXSTATE_CB_STOPPING:
+ Assert(enmState != RTTIMERLNXSTATE_CB_STOPPING || fForDestroy);
+ if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState,
+ !fForDestroy ? RTTIMERLNXSTATE_CB_STOPPING : RTTIMERLNXSTATE_CB_DESTROYING,
+ enmState))
+ return true;
+ break;
+
+ case RTTIMERLNXSTATE_STOPPED:
+ return VINF_SUCCESS;
+
+ case RTTIMERLNXSTATE_CB_DESTROYING:
+ AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState, pTimer));
+ return true;
+
+ default:
+ case RTTIMERLNXSTATE_STARTING:
+ case RTTIMERLNXSTATE_MP_STARTING:
+ case RTTIMERLNXSTATE_STOPPING:
+ case RTTIMERLNXSTATE_MP_STOPPING:
+ AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState, pTimer));
+ return false;
+ }
+
+ /* State not stable, try again. */
+ ASMNopPause();
+ }
+}
+
+
+RTDECL(int) RTTimerStop(PRTTIMER pTimer)
+{
+ /*
+ * Validate.
+ */
+ IPRT_LINUX_SAVE_EFL_AC();
+ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
+ AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
+ RTTIMERLNX_LOG(("stop %p\n", pTimer));
+
+ if (ASMAtomicUoReadBool(&pTimer->fSuspended))
+ return VERR_TIMER_SUSPENDED;
+
+ rtTimerLnxStop(pTimer, false /*fForDestroy*/);
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTTimerStop);
+
+
+RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
+{
+ unsigned long cJiffies;
+ unsigned long flFlags;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
+ AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
+ AssertReturn(u64NanoInterval, VERR_INVALID_PARAMETER);
+ AssertReturn(u64NanoInterval < UINT64_MAX / 8, VERR_INVALID_PARAMETER);
+ AssertReturn(pTimer->u64NanoInterval, VERR_INVALID_STATE);
+ RTTIMERLNX_LOG(("change %p %llu\n", pTimer, u64NanoInterval));
+
+#ifdef RTTIMER_LINUX_WITH_HRTIMER
+ /*
+ * For the high resolution timers it is easy since we don't care so much
+ * about when it is applied to the sub-timers.
+ */
+ if (pTimer->fHighRes)
+ {
+ ASMAtomicWriteU64(&pTimer->u64NanoInterval, u64NanoInterval);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+ }
+#endif
+
+ /*
+ * Standard timers have a bit more complicated way of calculating
+ * their interval and such. So, forget omni timers for now.
+ */
+ if (pTimer->cCpus > 1)
+ return VERR_NOT_SUPPORTED;
+
+ cJiffies = u64NanoInterval / RTTimerGetSystemGranularity();
+ if (cJiffies * RTTimerGetSystemGranularity() != u64NanoInterval)
+ cJiffies = 0;
+
+ spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
+ pTimer->aSubTimers[0].u.Std.fFirstAfterChg = true;
+ pTimer->cJiffies = cJiffies;
+ ASMAtomicWriteU64(&pTimer->u64NanoInterval, u64NanoInterval);
+ spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTTimerChangeInterval);
+
+
+RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
+{
+ bool fCanDestroy;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ /*
+ * Validate. It's ok to pass NULL pointer.
+ */
+ if (pTimer == /*NIL_RTTIMER*/ NULL)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
+ AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
+ RTTIMERLNX_LOG(("destroy %p\n", pTimer));
+/** @todo We should invalidate the magic here! */
+
+ /*
+ * Stop the timer if it's still active, then destroy it if we can.
+ */
+ if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
+ fCanDestroy = rtTimerLnxStop(pTimer, true /*fForDestroy*/);
+ else
+ {
+ uint32_t iCpu = pTimer->cCpus;
+ if (pTimer->cCpus > 1)
+ RTSpinlockAcquire(pTimer->hSpinlock);
+
+ fCanDestroy = true;
+ while (iCpu-- > 0)
+ {
+ for (;;)
+ {
+ RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState);
+ switch (enmState)
+ {
+ case RTTIMERLNXSTATE_CALLBACK:
+ case RTTIMERLNXSTATE_CB_RESTARTING:
+ case RTTIMERLNXSTATE_CB_STOPPING:
+ if (!rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_CB_DESTROYING, enmState))
+ continue;
+ fCanDestroy = false;
+ break;
+
+ case RTTIMERLNXSTATE_CB_DESTROYING:
+ AssertMsgFailed(("%d\n", enmState));
+ fCanDestroy = false;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ }
+
+ if (pTimer->cCpus > 1)
+ RTSpinlockRelease(pTimer->hSpinlock);
+ }
+
+ if (fCanDestroy)
+ {
+ /* For paranoid reasons, defer actually destroying the semaphore when
+ in atomic or interrupt context. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
+ if (in_atomic() || in_interrupt())
+#else
+ if (in_interrupt())
+#endif
+ rtR0LnxWorkqueuePush(&pTimer->DtorWorkqueueItem, rtTimerLnxDestroyDeferred);
+ else
+ rtTimerLnxDestroyIt(pTimer);
+ }
+
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTTimerDestroy);
+
+
+RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
+{
+ PRTTIMER pTimer;
+ RTCPUID iCpu;
+ unsigned cCpus;
+ int rc;
+ IPRT_LINUX_SAVE_EFL_AC();
+
+ rtR0LnxWorkqueueFlush(); /* for 2.4 */
+ *ppTimer = NULL;
+
+ /*
+ * Validate flags.
+ */
+ if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_INVALID_PARAMETER;
+ }
+ if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
+ && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
+ && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VERR_CPU_NOT_FOUND;
+ }
+
+ /*
+ * Allocate the timer handler.
+ */
+ cCpus = 1;
+#ifdef CONFIG_SMP
+ if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
+ {
+ cCpus = RTMpGetMaxCpuId() + 1;
+ Assert(cCpus <= RTCPUSET_MAX_CPUS); /* On linux we have a 1:1 relationship between cpuid and set index. */
+ AssertReturnStmt(u64NanoInterval, IPRT_LINUX_RESTORE_EFL_AC(), VERR_NOT_IMPLEMENTED); /* We don't implement single shot on all cpus, sorry. */
+ }
+#endif
+
+ rc = RTMemAllocEx(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cCpus]), 0,
+ RTMEMALLOCEX_FLAGS_ZEROED | RTMEMALLOCEX_FLAGS_ANY_CTX_FREE, (void **)&pTimer);
+ if (RT_FAILURE(rc))
+ {
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+ }
+
+ /*
+ * Initialize it.
+ */
+ pTimer->u32Magic = RTTIMER_MAGIC;
+ pTimer->hSpinlock = NIL_RTSPINLOCK;
+ pTimer->fSuspended = true;
+ pTimer->fHighRes = !!(fFlags & RTTIMER_FLAGS_HIGH_RES);
+#ifdef CONFIG_SMP
+ pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
+ pTimer->fAllCpus = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
+ pTimer->idCpu = pTimer->fSpecificCpu
+ ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)
+ : NIL_RTCPUID;
+#else
+ pTimer->fSpecificCpu = !!(fFlags & RTTIMER_FLAGS_CPU_SPECIFIC);
+ pTimer->idCpu = RTMpCpuId();
+#endif
+ pTimer->cCpus = cCpus;
+ pTimer->pfnTimer = pfnTimer;
+ pTimer->pvUser = pvUser;
+ pTimer->u64NanoInterval = u64NanoInterval;
+ pTimer->cJiffies = u64NanoInterval / RTTimerGetSystemGranularity();
+ if (pTimer->cJiffies * RTTimerGetSystemGranularity() != u64NanoInterval)
+ pTimer->cJiffies = 0;
+ spin_lock_init(&pTimer->ChgIntLock);
+
+ for (iCpu = 0; iCpu < cCpus; iCpu++)
+ {
+#ifdef RTTIMER_LINUX_WITH_HRTIMER
+ if (pTimer->fHighRes)
+ {
+ hrtimer_init(&pTimer->aSubTimers[iCpu].u.Hr.LnxTimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ pTimer->aSubTimers[iCpu].u.Hr.LnxTimer.function = rtTimerLinuxHrCallback;
+ }
+ else
+#endif
+ {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ timer_setup(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer, rtTimerLinuxStdCallback, TIMER_PINNED);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ init_timer_pinned(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
+#else
+ init_timer(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
+ pTimer->aSubTimers[iCpu].u.Std.LnxTimer.data = (unsigned long)&pTimer->aSubTimers[iCpu];
+ pTimer->aSubTimers[iCpu].u.Std.LnxTimer.function = rtTimerLinuxStdCallback;
+#endif
+ pTimer->aSubTimers[iCpu].u.Std.LnxTimer.expires = jiffies;
+ pTimer->aSubTimers[iCpu].u.Std.u64NextTS = 0;
+ }
+ pTimer->aSubTimers[iCpu].iTick = 0;
+ pTimer->aSubTimers[iCpu].pParent = pTimer;
+ pTimer->aSubTimers[iCpu].enmState = RTTIMERLNXSTATE_STOPPED;
+ }
+
+#ifdef CONFIG_SMP
+ /*
+ * If this is running on ALL cpus, we'll have to register a callback
+ * for MP events (so timers can be started/stopped on cpus going
+ * online/offline). We also create the spinlock for synchronizing
+ * stop/start/mp-event.
+ */
+ if (cCpus > 1)
+ {
+ int rc = RTSpinlockCreate(&pTimer->hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTTimerLnx");
+ if (RT_SUCCESS(rc))
+ rc = RTMpNotificationRegister(rtTimerLinuxMpEvent, pTimer);
+ else
+ pTimer->hSpinlock = NIL_RTSPINLOCK;
+ if (RT_FAILURE(rc))
+ {
+ RTTimerDestroy(pTimer);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return rc;
+ }
+ }
+#endif /* CONFIG_SMP */
+
+ RTTIMERLNX_LOG(("create %p hires=%d fFlags=%#x cCpus=%u\n", pTimer, pTimer->fHighRes, fFlags, cCpus));
+ *ppTimer = pTimer;
+ IPRT_LINUX_RESTORE_EFL_AC();
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTTimerCreateEx);
+
+
+RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
+{
+#if 0 /** @todo Not sure if this is what we want or not... Add new API for
+ * querying the resolution of the high res timers? */
+ struct timespec Ts;
+ int rc;
+ IPRT_LINUX_SAVE_EFL_AC();
+ rc = hrtimer_get_res(CLOCK_MONOTONIC, &Ts);
+ IPRT_LINUX_RESTORE_EFL_AC();
+ if (!rc)
+ {
+ Assert(!Ts.tv_sec);
+ return Ts.tv_nsec;
+ }
+#endif
+ return RT_NS_1SEC / HZ; /* ns */
+}
+RT_EXPORT_SYMBOL(RTTimerGetSystemGranularity);
+
+
+RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
+{
+ RT_NOREF_PV(u32Request); RT_NOREF_PV(*pu32Granted);
+ return VERR_NOT_SUPPORTED;
+}
+RT_EXPORT_SYMBOL(RTTimerRequestSystemGranularity);
+
+
+RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
+{
+ RT_NOREF_PV(u32Granted);
+ return VERR_NOT_SUPPORTED;
+}
+RT_EXPORT_SYMBOL(RTTimerReleaseSystemGranularity);
+
+
+RTDECL(bool) RTTimerCanDoHighResolution(void)
+{
+#ifdef RTTIMER_LINUX_WITH_HRTIMER
+ return true;
+#else
+ return false;
+#endif
+}
+RT_EXPORT_SYMBOL(RTTimerCanDoHighResolution);
+
diff --git a/src/VBox/Runtime/r0drv/linux/waitqueue-r0drv-linux.h b/src/VBox/Runtime/r0drv/linux/waitqueue-r0drv-linux.h
new file mode 100644
index 00000000..4ef87a59
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/linux/waitqueue-r0drv-linux.h
@@ -0,0 +1,292 @@
+/* $Id: waitqueue-r0drv-linux.h $ */
+/** @file
+ * IPRT - Linux Ring-0 Driver Helpers for Abstracting Wait Queues,
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_linux_waitqueue_r0drv_linux_h
+#define IPRT_INCLUDED_SRC_r0drv_linux_waitqueue_r0drv_linux_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include "the-linux-kernel.h"
+
+#include <iprt/asm-math.h>
+#include <iprt/err.h>
+#include <iprt/string.h>
+#include <iprt/time.h>
+
+/** The resolution (nanoseconds) specified when using
+ * schedule_hrtimeout_range. */
+#define RTR0SEMLNXWAIT_RESOLUTION 50000
+
+
+/**
+ * Kernel mode Linux wait state structure.
+ */
+typedef struct RTR0SEMLNXWAIT
+{
+ /** The wait queue entry. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 14) /* 4.13.0 and openSUSE */
+ wait_queue_entry_t WaitQE;
+#else
+ wait_queue_t WaitQE;
+#endif
+ /** The absolute timeout given as nano seconds since the start of the
+ * monotonic clock. */
+ uint64_t uNsAbsTimeout;
+ /** The timeout in nano seconds relative to the start of the wait. */
+ uint64_t cNsRelTimeout;
+ /** The native timeout value. */
+ union
+ {
+#ifdef IPRT_LINUX_HAS_HRTIMER
+ /** The timeout when fHighRes is true. Absolute, so no updating. */
+ ktime_t KtTimeout;
+#endif
+ /** The timeout when fHighRes is false. Updated after waiting. */
+ long lTimeout;
+ } u;
+ /** Set if we use high resolution timeouts. */
+ bool fHighRes;
+ /** Set if it's an indefinite wait. */
+ bool fIndefinite;
+ /** Set if we've already timed out.
+ * Set by rtR0SemLnxWaitDoIt and read by rtR0SemLnxWaitHasTimedOut. */
+ bool fTimedOut;
+ /** TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE. */
+ int iWaitState;
+ /** The wait queue. */
+ wait_queue_head_t *pWaitQueue;
+} RTR0SEMLNXWAIT;
+/** Pointer to a linux wait state. */
+typedef RTR0SEMLNXWAIT *PRTR0SEMLNXWAIT;
+
+
+/**
+ * Initializes a wait.
+ *
+ * The caller MUST check the wait condition BEFORE calling this function or the
+ * timeout logic will be flawed.
+ *
+ * @returns VINF_SUCCESS or VERR_TIMEOUT.
+ * @param pWait The wait structure.
+ * @param fFlags The wait flags.
+ * @param uTimeout The timeout.
+ * @param pWaitQueue The wait queue head.
+ */
+DECLINLINE(int) rtR0SemLnxWaitInit(PRTR0SEMLNXWAIT pWait, uint32_t fFlags, uint64_t uTimeout,
+ wait_queue_head_t *pWaitQueue)
+{
+ /*
+ * Process the flags and timeout.
+ */
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+/** @todo optimize: millisecs -> nanosecs -> millisec -> jiffies */
+ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
+ uTimeout = uTimeout < UINT64_MAX / RT_US_1SEC * RT_US_1SEC
+ ? uTimeout * RT_US_1SEC
+ : UINT64_MAX;
+ if (uTimeout == UINT64_MAX)
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ {
+ uint64_t u64Now;
+ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
+ {
+ if (uTimeout == 0)
+ return VERR_TIMEOUT;
+
+ u64Now = RTTimeSystemNanoTS();
+ pWait->cNsRelTimeout = uTimeout;
+ pWait->uNsAbsTimeout = u64Now + uTimeout;
+ if (pWait->uNsAbsTimeout < u64Now) /* overflow */
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ }
+ else
+ {
+ u64Now = RTTimeSystemNanoTS();
+ if (u64Now >= uTimeout)
+ return VERR_TIMEOUT;
+
+ pWait->cNsRelTimeout = uTimeout - u64Now;
+ pWait->uNsAbsTimeout = uTimeout;
+ }
+ }
+ }
+
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+ pWait->fIndefinite = false;
+#ifdef IPRT_LINUX_HAS_HRTIMER
+ if ( (fFlags & (RTSEMWAIT_FLAGS_NANOSECS | RTSEMWAIT_FLAGS_ABSOLUTE))
+ || pWait->cNsRelTimeout < RT_NS_1SEC / HZ * 4)
+ {
+ pWait->fHighRes = true;
+# if BITS_PER_LONG < 64
+ if ( KTIME_SEC_MAX <= LONG_MAX
+ && pWait->uNsAbsTimeout >= KTIME_SEC_MAX * RT_NS_1SEC_64 + (RT_NS_1SEC - 1))
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+# endif
+ pWait->u.KtTimeout = ns_to_ktime(pWait->uNsAbsTimeout);
+ }
+ else
+#endif
+ {
+ uint64_t cJiffies = ASMMultU64ByU32DivByU32(pWait->cNsRelTimeout, HZ, RT_NS_1SEC);
+ if (cJiffies >= MAX_JIFFY_OFFSET)
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ {
+ pWait->u.lTimeout = (long)cJiffies;
+ pWait->fHighRes = false;
+ }
+ }
+ }
+
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ {
+ pWait->fIndefinite = true;
+ pWait->fHighRes = false;
+ pWait->uNsAbsTimeout = UINT64_MAX;
+ pWait->cNsRelTimeout = UINT64_MAX;
+ pWait->u.lTimeout = LONG_MAX;
+ }
+
+ pWait->fTimedOut = false;
+
+ /*
+ * Initialize the wait queue related bits.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 39)
+ init_wait((&pWait->WaitQE));
+#else
+ RT_ZERO(pWait->WaitQE);
+ init_waitqueue_entry((&pWait->WaitQE), current);
+#endif
+ pWait->pWaitQueue = pWaitQueue;
+ pWait->iWaitState = fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE
+ ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Prepares the next wait.
+ *
+ * This must be called before rtR0SemLnxWaitDoIt, and the caller should check
+ * the exit conditions in-between the two calls.
+ *
+ * @param pWait The wait structure.
+ */
+DECLINLINE(void) rtR0SemLnxWaitPrepare(PRTR0SEMLNXWAIT pWait)
+{
+ /* Make everything thru schedule*() atomic scheduling wise. (Is this correct?) */
+ prepare_to_wait(pWait->pWaitQueue, &pWait->WaitQE, pWait->iWaitState);
+}
+
+
+/**
+ * Do the actual wait.
+ *
+ * @param pWait The wait structure.
+ */
+DECLINLINE(void) rtR0SemLnxWaitDoIt(PRTR0SEMLNXWAIT pWait)
+{
+ if (pWait->fIndefinite)
+ schedule();
+#ifdef IPRT_LINUX_HAS_HRTIMER
+ else if (pWait->fHighRes)
+ {
+ int rc = schedule_hrtimeout_range(&pWait->u.KtTimeout, HRTIMER_MODE_ABS, RTR0SEMLNXWAIT_RESOLUTION);
+ if (!rc)
+ pWait->fTimedOut = true;
+ }
+#endif
+ else
+ {
+ pWait->u.lTimeout = schedule_timeout(pWait->u.lTimeout);
+ if (pWait->u.lTimeout <= 0)
+ pWait->fTimedOut = true;
+ }
+ after_wait((&pWait->WaitQE));
+}
+
+
+/**
+ * Checks if a linux wait was interrupted.
+ *
+ * @returns true / false
+ * @param pWait The wait structure.
+ * @remarks This shall be called before the first rtR0SemLnxWaitDoIt().
+ */
+DECLINLINE(bool) rtR0SemLnxWaitWasInterrupted(PRTR0SEMLNXWAIT pWait)
+{
+ return pWait->iWaitState == TASK_INTERRUPTIBLE
+ && signal_pending(current);
+}
+
+
+/**
+ * Checks if a linux wait has timed out.
+ *
+ * @returns true / false
+ * @param pWait The wait structure.
+ */
+DECLINLINE(bool) rtR0SemLnxWaitHasTimedOut(PRTR0SEMLNXWAIT pWait)
+{
+ return pWait->fTimedOut;
+}
+
+
+/**
+ * Deletes a linux wait.
+ *
+ * @param pWait The wait structure.
+ */
+DECLINLINE(void) rtR0SemLnxWaitDelete(PRTR0SEMLNXWAIT pWait)
+{
+ finish_wait(pWait->pWaitQueue, &pWait->WaitQE);
+}
+
+
+/**
+ * Gets the max resolution of the timeout machinery.
+ *
+ * @returns Resolution specified in nanoseconds.
+ */
+DECLINLINE(uint32_t) rtR0SemLnxWaitGetResolution(void)
+{
+#ifdef IPRT_LINUX_HAS_HRTIMER
+ return RTR0SEMLNXWAIT_RESOLUTION;
+#else
+ return RT_NS_1SEC / HZ; /* ns */
+#endif
+}
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_linux_waitqueue_r0drv_linux_h */
+
diff --git a/src/VBox/Runtime/r0drv/memobj-r0drv.cpp b/src/VBox/Runtime/r0drv/memobj-r0drv.cpp
new file mode 100644
index 00000000..f5f61a39
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/memobj-r0drv.cpp
@@ -0,0 +1,808 @@
+/* $Id: memobj-r0drv.cpp $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects, Common Code.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP RTLOGGROUP_DEFAULT /// @todo RTLOGGROUP_MEM
+#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
+#include <iprt/memobj.h>
+#include "internal/iprt.h"
+
+#include <iprt/alloc.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/log.h>
+#include <iprt/mp.h>
+#include <iprt/param.h>
+#include <iprt/process.h>
+#include <iprt/thread.h>
+
+#include "internal/memobj.h"
+
+
+/**
+ * Internal function for allocating a new memory object.
+ *
+ * @returns The allocated and initialized handle.
+ * @param cbSelf The size of the memory object handle. 0 mean default size.
+ * @param enmType The memory object type.
+ * @param pv The memory object mapping.
+ * @param cb The size of the memory object.
+ */
+DECLHIDDEN(PRTR0MEMOBJINTERNAL) rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb)
+{
+ PRTR0MEMOBJINTERNAL pNew;
+
+ /* validate the size */
+ if (!cbSelf)
+ cbSelf = sizeof(*pNew);
+ Assert(cbSelf >= sizeof(*pNew));
+ Assert(cbSelf == (uint32_t)cbSelf);
+ AssertMsg(RT_ALIGN_Z(cb, PAGE_SIZE) == cb, ("%#zx\n", cb));
+
+ /*
+ * Allocate and initialize the object.
+ */
+ pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cbSelf);
+ if (pNew)
+ {
+ pNew->u32Magic = RTR0MEMOBJ_MAGIC;
+ pNew->cbSelf = (uint32_t)cbSelf;
+ pNew->enmType = enmType;
+ pNew->fFlags = 0;
+ pNew->cb = cb;
+ pNew->pv = pv;
+ }
+ return pNew;
+}
+
+
+/**
+ * Deletes an incomplete memory object.
+ *
+ * This is for cleaning up after failures during object creation.
+ *
+ * @param pMem The incomplete memory object to delete.
+ */
+DECLHIDDEN(void) rtR0MemObjDelete(PRTR0MEMOBJINTERNAL pMem)
+{
+ if (pMem)
+ {
+ ASMAtomicUoWriteU32(&pMem->u32Magic, ~RTR0MEMOBJ_MAGIC);
+ pMem->enmType = RTR0MEMOBJTYPE_END;
+ RTMemFree(pMem);
+ }
+}
+
+
+/**
+ * Links a mapping object to a primary object.
+ *
+ * @returns IPRT status code.
+ * @retval VINF_SUCCESS on success.
+ * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
+ * @param pParent The parent (primary) memory object.
+ * @param pChild The child (mapping) memory object.
+ */
+static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
+{
+ uint32_t i;
+
+ /* sanity */
+ Assert(rtR0MemObjIsMapping(pChild));
+ Assert(!rtR0MemObjIsMapping(pParent));
+
+ /* expand the array? */
+ i = pParent->uRel.Parent.cMappings;
+ if (i >= pParent->uRel.Parent.cMappingsAllocated)
+ {
+ void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
+ (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
+ if (!pv)
+ return VERR_NO_MEMORY;
+ pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
+ pParent->uRel.Parent.cMappingsAllocated = i + 32;
+ Assert(i == pParent->uRel.Parent.cMappings);
+ }
+
+ /* do the linking. */
+ pParent->uRel.Parent.papMappings[i] = pChild;
+ pParent->uRel.Parent.cMappings++;
+ pChild->uRel.Child.pParent = pParent;
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if this is mapping or not.
+ *
+ * @returns true if it's a mapping, otherwise false.
+ * @param MemObj The ring-0 memory object handle.
+ */
+RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
+{
+ /* Validate the object handle. */
+ PRTR0MEMOBJINTERNAL pMem;
+ AssertPtrReturn(MemObj, false);
+ pMem = (PRTR0MEMOBJINTERNAL)MemObj;
+ AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
+ AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
+
+ /* hand it on to the inlined worker. */
+ return rtR0MemObjIsMapping(pMem);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjIsMapping);
+
+
+/**
+ * Gets the address of a ring-0 memory object.
+ *
+ * @returns The address of the memory object.
+ * @returns NULL if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
+ * @param MemObj The ring-0 memory object handle.
+ */
+RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
+{
+ /* Validate the object handle. */
+ PRTR0MEMOBJINTERNAL pMem;
+ if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
+ return NULL;
+ AssertPtrReturn(MemObj, NULL);
+ pMem = (PRTR0MEMOBJINTERNAL)MemObj;
+ AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NULL);
+ AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NULL);
+
+ /* return the mapping address. */
+ return pMem->pv;
+}
+RT_EXPORT_SYMBOL(RTR0MemObjAddress);
+
+
+/**
+ * Gets the ring-3 address of a ring-0 memory object.
+ *
+ * This only applies to ring-0 memory object with ring-3 mappings of some kind, i.e.
+ * locked user memory, reserved user address space and user mappings. This API should
+ * not be used on any other objects.
+ *
+ * @returns The address of the memory object.
+ * @returns NIL_RTR3PTR if the handle is invalid or if it's not an object with a ring-3 mapping.
+ * Strict builds will assert in both cases.
+ * @param MemObj The ring-0 memory object handle.
+ */
+RTR0DECL(RTR3PTR) RTR0MemObjAddressR3(RTR0MEMOBJ MemObj)
+{
+ PRTR0MEMOBJINTERNAL pMem;
+
+ /* Validate the object handle. */
+ if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
+ return NIL_RTR3PTR;
+ AssertPtrReturn(MemObj, NIL_RTR3PTR);
+ pMem = (PRTR0MEMOBJINTERNAL)MemObj;
+ AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTR3PTR);
+ AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR);
+ if (RT_UNLIKELY( ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
+ || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
+ && ( pMem->enmType != RTR0MEMOBJTYPE_LOCK
+ || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
+ && ( pMem->enmType != RTR0MEMOBJTYPE_PHYS_NC
+ || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
+ && ( pMem->enmType != RTR0MEMOBJTYPE_RES_VIRT
+ || pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS)))
+ return NIL_RTR3PTR;
+
+ /* return the mapping address. */
+ return (RTR3PTR)pMem->pv;
+}
+RT_EXPORT_SYMBOL(RTR0MemObjAddressR3);
+
+
+/**
+ * Gets the size of a ring-0 memory object.
+ *
+ * The returned value may differ from the one specified to the API creating the
+ * object because of alignment adjustments. The minimal alignment currently
+ * employed by any API is PAGE_SIZE, so the result can safely be shifted by
+ * PAGE_SHIFT to calculate a page count.
+ *
+ * @returns The object size.
+ * @returns 0 if the handle is invalid (asserts in strict builds) or if there isn't any mapping.
+ * @param MemObj The ring-0 memory object handle.
+ */
+RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
+{
+ PRTR0MEMOBJINTERNAL pMem;
+
+ /* Validate the object handle. */
+ if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
+ return 0;
+ AssertPtrReturn(MemObj, 0);
+ pMem = (PRTR0MEMOBJINTERNAL)MemObj;
+ AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
+ AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
+ AssertMsg(RT_ALIGN_Z(pMem->cb, PAGE_SIZE) == pMem->cb, ("%#zx\n", pMem->cb));
+
+ /* return the size. */
+ return pMem->cb;
+}
+RT_EXPORT_SYMBOL(RTR0MemObjSize);
+
+
+/**
+ * Get the physical address of an page in the memory object.
+ *
+ * @returns The physical address.
+ * @returns NIL_RTHCPHYS if the object doesn't contain fixed physical pages.
+ * @returns NIL_RTHCPHYS if the iPage is out of range.
+ * @returns NIL_RTHCPHYS if the object handle isn't valid.
+ * @param MemObj The ring-0 memory object handle.
+ * @param iPage The page number within the object.
+ */
+/* Work around gcc bug 55940 */
+#if defined(__GNUC__) && defined(RT_ARCH_X86) && (__GNUC__ * 100 + __GNUC_MINOR__) == 407
+ __attribute__((__optimize__ ("no-shrink-wrap")))
+#endif
+RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage)
+{
+ /* Validate the object handle. */
+ PRTR0MEMOBJINTERNAL pMem;
+ size_t cPages;
+ AssertPtrReturn(MemObj, NIL_RTHCPHYS);
+ pMem = (PRTR0MEMOBJINTERNAL)MemObj;
+ AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
+ AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
+ AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTHCPHYS);
+ AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTHCPHYS);
+ cPages = (pMem->cb >> PAGE_SHIFT);
+ if (iPage >= cPages)
+ {
+ /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
+ if (iPage == cPages)
+ return NIL_RTHCPHYS;
+ AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
+ }
+
+ /*
+ * We know the address of physically contiguous allocations and mappings.
+ */
+ if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
+ return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
+ if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
+ return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
+
+ /*
+ * Do the job.
+ */
+ return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjGetPagePhysAddr);
+
+
+/**
+ * Frees a ring-0 memory object.
+ *
+ * @returns IPRT status code.
+ * @retval VERR_INVALID_HANDLE if
+ * @param MemObj The ring-0 memory object to be freed. NULL is accepted.
+ * @param fFreeMappings Whether or not to free mappings of the object.
+ */
+RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
+{
+ /*
+ * Validate the object handle.
+ */
+ PRTR0MEMOBJINTERNAL pMem;
+ int rc;
+
+ if (MemObj == NIL_RTR0MEMOBJ)
+ return VINF_SUCCESS;
+ AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
+ pMem = (PRTR0MEMOBJINTERNAL)MemObj;
+ AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
+ AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
+ RT_ASSERT_PREEMPTIBLE();
+
+ /*
+ * Deal with mappings according to fFreeMappings.
+ */
+ if ( !rtR0MemObjIsMapping(pMem)
+ && pMem->uRel.Parent.cMappings > 0)
+ {
+ /* fail if not requested to free mappings. */
+ if (!fFreeMappings)
+ return VERR_MEMORY_BUSY;
+
+ while (pMem->uRel.Parent.cMappings > 0)
+ {
+ PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
+ pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
+
+ /* sanity checks. */
+ AssertPtr(pChild);
+ AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
+ AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
+ AssertFatal(rtR0MemObjIsMapping(pChild));
+
+ /* free the mapping. */
+ rc = rtR0MemObjNativeFree(pChild);
+ if (RT_FAILURE(rc))
+ {
+ Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Rrc\n", pChild, pChild->pv, pChild->cb, rc));
+ pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
+ return rc;
+ }
+ }
+ }
+
+ /*
+ * Free this object.
+ */
+ rc = rtR0MemObjNativeFree(pMem);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
+ */
+ if (rtR0MemObjIsMapping(pMem))
+ {
+ PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
+ uint32_t i;
+
+ /* sanity checks */
+ AssertPtr(pParent);
+ AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
+ AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
+ AssertFatal(!rtR0MemObjIsMapping(pParent));
+ AssertFatal(pParent->uRel.Parent.cMappings > 0);
+ AssertPtr(pParent->uRel.Parent.papMappings);
+
+ /* locate and remove from the array of mappings. */
+ i = pParent->uRel.Parent.cMappings;
+ while (i-- > 0)
+ {
+ if (pParent->uRel.Parent.papMappings[i] == pMem)
+ {
+ pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
+ break;
+ }
+ }
+ Assert(i != UINT32_MAX);
+ }
+ else
+ Assert(pMem->uRel.Parent.cMappings == 0);
+
+ /*
+ * Finally, destroy the handle.
+ */
+ pMem->u32Magic++;
+ pMem->enmType = RTR0MEMOBJTYPE_END;
+ if (!rtR0MemObjIsMapping(pMem))
+ RTMemFree(pMem->uRel.Parent.papMappings);
+ RTMemFree(pMem);
+ }
+ else
+ Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Rrc\n",
+ pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
+ return rc;
+}
+RT_EXPORT_SYMBOL(RTR0MemObjFree);
+
+
+
+RTR0DECL(int) RTR0MemObjAllocPageTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
+{
+ /* sanity checks. */
+ const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the allocation. */
+ return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjAllocPageTag);
+
+
+RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
+{
+ /* sanity checks. */
+ const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the allocation. */
+ return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjAllocLowTag);
+
+
+RTR0DECL(int) RTR0MemObjAllocContTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
+{
+ /* sanity checks. */
+ const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the allocation. */
+ return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, fExecutable);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjAllocContTag);
+
+
+RTR0DECL(int) RTR0MemObjLockUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb,
+ uint32_t fAccess, RTR0PROCESS R0Process, const char *pszTag)
+{
+ /* sanity checks. */
+ const size_t cbAligned = RT_ALIGN_Z(cb + (R3Ptr & PAGE_OFFSET_MASK), PAGE_SIZE);
+ RTR3PTR const R3PtrAligned = (R3Ptr & ~(RTR3PTR)PAGE_OFFSET_MASK);
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
+ if (R0Process == NIL_RTR0PROCESS)
+ R0Process = RTR0ProcHandleSelf();
+ AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
+ AssertReturn(fAccess, VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the locking. */
+ return rtR0MemObjNativeLockUser(pMemObj, R3PtrAligned, cbAligned, fAccess, R0Process);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjLockUserTag);
+
+
+RTR0DECL(int) RTR0MemObjLockKernelTag(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
+{
+ /* sanity checks. */
+ const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
+ void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
+ AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
+ AssertReturn(fAccess, VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the allocation. */
+ return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned, fAccess);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjLockKernelTag);
+
+
+RTR0DECL(int) RTR0MemObjAllocPhysTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
+{
+ /* sanity checks. */
+ const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
+ AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the allocation. */
+ return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, PAGE_SIZE /* page aligned */);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysTag);
+
+
+RTR0DECL(int) RTR0MemObjAllocPhysExTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment, const char *pszTag)
+{
+ /* sanity checks. */
+ const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
+ AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
+ if (uAlignment == 0)
+ uAlignment = PAGE_SIZE;
+ AssertReturn( uAlignment == PAGE_SIZE
+ || uAlignment == _2M
+ || uAlignment == _4M
+ || uAlignment == _1G,
+ VERR_INVALID_PARAMETER);
+#if HC_ARCH_BITS == 32
+ /* Memory allocated in this way is typically mapped into kernel space as well; simply
+ don't allow this on 32 bits hosts as the kernel space is too crowded already. */
+ if (uAlignment != PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+#endif
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the allocation. */
+ return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, uAlignment);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysExTag);
+
+
+RTR0DECL(int) RTR0MemObjAllocPhysNCTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
+{
+ /* sanity checks. */
+ const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
+ AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the allocation. */
+ return rtR0MemObjNativeAllocPhysNC(pMemObj, cbAligned, PhysHighest);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysNCTag);
+
+
+RTR0DECL(int) RTR0MemObjEnterPhysTag(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy, const char *pszTag)
+{
+ /* sanity checks. */
+ const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
+ const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
+ AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
+ AssertReturn( uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE
+ || uCachePolicy == RTMEM_CACHE_POLICY_MMIO,
+ VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the allocation. */
+ return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned, uCachePolicy);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjEnterPhysTag);
+
+
+RTR0DECL(int) RTR0MemObjReserveKernelTag(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment, const char *pszTag)
+{
+ /* sanity checks. */
+ const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ if (uAlignment == 0)
+ uAlignment = PAGE_SIZE;
+ AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
+ if (pvFixed != (void *)-1)
+ AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the reservation. */
+ return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjReserveKernelTag);
+
+
+RTR0DECL(int) RTR0MemObjReserveUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3PtrFixed, size_t cb,
+ size_t uAlignment, RTR0PROCESS R0Process, const char *pszTag)
+{
+ /* sanity checks. */
+ const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ if (uAlignment == 0)
+ uAlignment = PAGE_SIZE;
+ AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
+ AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
+ if (R3PtrFixed != (RTR3PTR)-1)
+ AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
+ if (R0Process == NIL_RTR0PROCESS)
+ R0Process = RTR0ProcHandleSelf();
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the reservation. */
+ return rtR0MemObjNativeReserveUser(pMemObj, R3PtrFixed, cbAligned, uAlignment, R0Process);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjReserveUserTag);
+
+
+RTR0DECL(int) RTR0MemObjMapKernelTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed,
+ size_t uAlignment, unsigned fProt, const char *pszTag)
+{
+ return RTR0MemObjMapKernelExTag(pMemObj, MemObjToMap, pvFixed, uAlignment, fProt, 0, 0, pszTag);
+}
+RT_EXPORT_SYMBOL(RTR0MemObjMapKernelTag);
+
+
+RTR0DECL(int) RTR0MemObjMapKernelExTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment,
+ unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
+{
+ PRTR0MEMOBJINTERNAL pMemToMap;
+ PRTR0MEMOBJINTERNAL pNew;
+ int rc;
+
+ /* sanity checks. */
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ *pMemObj = NIL_RTR0MEMOBJ;
+ AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
+ pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
+ AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
+ AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
+ AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
+ AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
+ if (uAlignment == 0)
+ uAlignment = PAGE_SIZE;
+ AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
+ if (pvFixed != (void *)-1)
+ AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
+ AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
+ AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn(offSub < pMemToMap->cb, VERR_INVALID_PARAMETER);
+ AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn(cbSub <= pMemToMap->cb, VERR_INVALID_PARAMETER);
+ AssertReturn((!offSub && !cbSub) || (offSub + cbSub) <= pMemToMap->cb, VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* adjust the request to simplify the native code. */
+ if (offSub == 0 && cbSub == pMemToMap->cb)
+ cbSub = 0;
+
+ /* do the mapping. */
+ rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt, offSub, cbSub);
+ if (RT_SUCCESS(rc))
+ {
+ /* link it. */
+ rc = rtR0MemObjLink(pMemToMap, pNew);
+ if (RT_SUCCESS(rc))
+ *pMemObj = pNew;
+ else
+ {
+ /* damn, out of memory. bail out. */
+ int rc2 = rtR0MemObjNativeFree(pNew);
+ AssertRC(rc2);
+ pNew->u32Magic++;
+ pNew->enmType = RTR0MEMOBJTYPE_END;
+ RTMemFree(pNew);
+ }
+ }
+
+ return rc;
+}
+RT_EXPORT_SYMBOL(RTR0MemObjMapKernelExTag);
+
+
+RTR0DECL(int) RTR0MemObjMapUserTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed,
+ size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, const char *pszTag)
+{
+ /* sanity checks. */
+ PRTR0MEMOBJINTERNAL pMemToMap;
+ PRTR0MEMOBJINTERNAL pNew;
+ int rc;
+ AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
+ pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
+ *pMemObj = NIL_RTR0MEMOBJ;
+ AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
+ AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
+ AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
+ AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
+ AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
+ if (uAlignment == 0)
+ uAlignment = PAGE_SIZE;
+ AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
+ if (R3PtrFixed != (RTR3PTR)-1)
+ AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
+ AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
+ if (R0Process == NIL_RTR0PROCESS)
+ R0Process = RTR0ProcHandleSelf();
+ RT_ASSERT_PREEMPTIBLE();
+
+ RT_NOREF_PV(pszTag);
+
+ /* do the mapping. */
+ rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, R3PtrFixed, uAlignment, fProt, R0Process);
+ if (RT_SUCCESS(rc))
+ {
+ /* link it. */
+ rc = rtR0MemObjLink(pMemToMap, pNew);
+ if (RT_SUCCESS(rc))
+ *pMemObj = pNew;
+ else
+ {
+ /* damn, out of memory. bail out. */
+ int rc2 = rtR0MemObjNativeFree(pNew);
+ AssertRC(rc2);
+ pNew->u32Magic++;
+ pNew->enmType = RTR0MEMOBJTYPE_END;
+ RTMemFree(pNew);
+ }
+ }
+
+ return rc;
+}
+RT_EXPORT_SYMBOL(RTR0MemObjMapUserTag);
+
+
+RTR0DECL(int) RTR0MemObjProtect(RTR0MEMOBJ hMemObj, size_t offSub, size_t cbSub, uint32_t fProt)
+{
+ PRTR0MEMOBJINTERNAL pMemObj;
+ int rc;
+
+ /* sanity checks. */
+ pMemObj = (PRTR0MEMOBJINTERNAL)hMemObj;
+ AssertPtrReturn(pMemObj, VERR_INVALID_HANDLE);
+ AssertReturn(pMemObj->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
+ AssertReturn(pMemObj->enmType > RTR0MEMOBJTYPE_INVALID && pMemObj->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
+ AssertReturn(rtR0MemObjIsProtectable(pMemObj), VERR_INVALID_PARAMETER);
+ AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn(offSub < pMemObj->cb, VERR_INVALID_PARAMETER);
+ AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
+ AssertReturn(cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
+ AssertReturn(offSub + cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fProt & ~(RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ /* do the job */
+ rc = rtR0MemObjNativeProtect(pMemObj, offSub, cbSub, fProt);
+ if (RT_SUCCESS(rc))
+ pMemObj->fFlags |= RTR0MEMOBJ_FLAGS_PROT_CHANGED; /* record it */
+
+ return rc;
+}
+RT_EXPORT_SYMBOL(RTR0MemObjProtect);
+
diff --git a/src/VBox/Runtime/r0drv/mp-r0drv.h b/src/VBox/Runtime/r0drv/mp-r0drv.h
new file mode 100644
index 00000000..c11e8ded
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/mp-r0drv.h
@@ -0,0 +1,85 @@
+/* $Id: mp-r0drv.h $ */
+/** @file
+ * IPRT - Multiprocessor, Ring-0 Driver, Internal Header.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_mp_r0drv_h
+#define IPRT_INCLUDED_SRC_r0drv_mp_r0drv_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/mp.h>
+
+RT_C_DECLS_BEGIN
+
+/**
+ * MP callback
+ *
+ * @param idCpu CPU id
+ * @param pvUser1 The first user argument.
+ * @param pvUser2 The second user argument.
+ */
+typedef DECLCALLBACK(void) FNMPWORKER(RTCPUID idCpu, void *pvUser1, void *pvUser2);
+/** Pointer to a FNMPWORKER(). */
+typedef FNMPWORKER *PFNMPWORKER;
+
+/**
+ * RTMpOn* argument packet used by the host specific callback
+ * wrapper functions.
+ */
+typedef struct RTMPARGS
+{
+ PFNMPWORKER pfnWorker;
+ void *pvUser1;
+ void *pvUser2;
+ RTCPUID idCpu;
+ RTCPUID idCpu2;
+ uint32_t volatile cHits;
+#ifdef RT_OS_WINDOWS
+ /** Turns out that KeFlushQueuedDpcs doesn't necessarily wait till all
+ * callbacks are done. So, do reference counting to make sure we don't free
+ * this structure befor all CPUs have completely handled their requests. */
+ int32_t volatile cRefs;
+#endif
+#ifdef RT_OS_LINUX
+ PRTCPUSET pWorkerSet;
+#endif
+} RTMPARGS;
+/** Pointer to a RTMpOn* argument packet. */
+typedef RTMPARGS *PRTMPARGS;
+
+/* Called from initterm-r0drv.cpp: */
+DECLHIDDEN(int) rtR0MpNotificationInit(void);
+DECLHIDDEN(void) rtR0MpNotificationTerm(void);
+
+/* The following is only relevant when using mpnotifcation-r0drv.cpp: */
+DECLHIDDEN(int) rtR0MpNotificationNativeInit(void);
+DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void);
+DECLHIDDEN(void) rtMpNotificationDoCallbacks(RTMPEVENT enmEvent, RTCPUID idCpu);
+
+RT_C_DECLS_END
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_mp_r0drv_h */
+
diff --git a/src/VBox/Runtime/r0drv/mpnotification-r0drv.c b/src/VBox/Runtime/r0drv/mpnotification-r0drv.c
new file mode 100644
index 00000000..ee90dca4
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/mpnotification-r0drv.c
@@ -0,0 +1,322 @@
+/* $Id: mpnotification-r0drv.c $ */
+/** @file
+ * IPRT - Multiprocessor, Ring-0 Driver, Event Notifications.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/mp.h>
+#include "internal/iprt.h"
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/spinlock.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include "r0drv/mp-r0drv.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Notification registration record tracking
+ * RTMpRegisterNotification() calls.
+ */
+typedef struct RTMPNOTIFYREG
+{
+ /** Pointer to the next record. */
+ struct RTMPNOTIFYREG * volatile pNext;
+ /** The callback. */
+ PFNRTMPNOTIFICATION pfnCallback;
+ /** The user argument. */
+ void *pvUser;
+ /** Bit mask indicating whether we've done this callback or not. */
+ uint8_t bmDone[sizeof(void *)];
+} RTMPNOTIFYREG;
+/** Pointer to a registration record. */
+typedef RTMPNOTIFYREG *PRTMPNOTIFYREG;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** The spinlock protecting the list. */
+static RTSPINLOCK volatile g_hRTMpNotifySpinLock = NIL_RTSPINLOCK;
+/** List of callbacks, in registration order. */
+static PRTMPNOTIFYREG volatile g_pRTMpCallbackHead = NULL;
+/** The current done bit. */
+static uint32_t volatile g_iRTMpDoneBit;
+/** The list generation.
+ * This is increased whenever the list has been modified. The callback routine
+ * make use of this to avoid having restart at the list head after each callback. */
+static uint32_t volatile g_iRTMpGeneration;
+
+
+
+
+/**
+ * This is called by the native code.
+ *
+ * @param idCpu The CPU id the event applies to.
+ * @param enmEvent The event.
+ */
+DECLHIDDEN(void) rtMpNotificationDoCallbacks(RTMPEVENT enmEvent, RTCPUID idCpu)
+{
+ PRTMPNOTIFYREG pCur;
+ RTSPINLOCK hSpinlock;
+
+ /*
+ * This is a little bit tricky as we cannot be holding the spinlock
+ * while calling the callback. This means that the list might change
+ * while we're walking it, and that multiple events might be running
+ * concurrently (depending on the OS).
+ *
+ * So, the first measure is to employ a 32-bitmask for each
+ * record where we'll use a bit that rotates for each call to
+ * this function to indicate which records that has been
+ * processed. This will take care of both changes to the list
+ * and a reasonable amount of concurrent events.
+ *
+ * In order to avoid having to restart the list walks for every
+ * callback we make, we'll make use a list generation number that is
+ * incremented everytime the list is changed. So, if it remains
+ * unchanged over a callback we can safely continue the iteration.
+ */
+ uint32_t iDone = ASMAtomicIncU32(&g_iRTMpDoneBit);
+ iDone %= RT_SIZEOFMEMB(RTMPNOTIFYREG, bmDone) * 8;
+
+ hSpinlock = g_hRTMpNotifySpinLock;
+ if (hSpinlock == NIL_RTSPINLOCK)
+ return;
+ RTSpinlockAcquire(hSpinlock);
+
+ /* Clear the bit. */
+ for (pCur = g_pRTMpCallbackHead; pCur; pCur = pCur->pNext)
+ ASMAtomicBitClear(&pCur->bmDone[0], iDone);
+
+ /* Iterate the records and perform the callbacks. */
+ do
+ {
+ uint32_t const iGeneration = ASMAtomicUoReadU32(&g_iRTMpGeneration);
+
+ pCur = g_pRTMpCallbackHead;
+ while (pCur)
+ {
+ if (!ASMAtomicBitTestAndSet(&pCur->bmDone[0], iDone))
+ {
+ PFNRTMPNOTIFICATION pfnCallback = pCur->pfnCallback;
+ void *pvUser = pCur->pvUser;
+ pCur = pCur->pNext;
+ RTSpinlockRelease(g_hRTMpNotifySpinLock);
+
+ pfnCallback(enmEvent, idCpu, pvUser);
+
+ /* carefully require the lock here, see RTR0MpNotificationTerm(). */
+ hSpinlock = g_hRTMpNotifySpinLock;
+ if (hSpinlock == NIL_RTSPINLOCK)
+ return;
+ RTSpinlockAcquire(hSpinlock);
+ if (ASMAtomicUoReadU32(&g_iRTMpGeneration) != iGeneration)
+ break;
+ }
+ else
+ pCur = pCur->pNext;
+ }
+ } while (pCur);
+
+ RTSpinlockRelease(hSpinlock);
+}
+
+
+
+RTDECL(int) RTMpNotificationRegister(PFNRTMPNOTIFICATION pfnCallback, void *pvUser)
+{
+ PRTMPNOTIFYREG pCur;
+ PRTMPNOTIFYREG pNew;
+
+ /*
+ * Validation.
+ */
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+ AssertReturn(g_hRTMpNotifySpinLock != NIL_RTSPINLOCK, VERR_WRONG_ORDER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RTSpinlockAcquire(g_hRTMpNotifySpinLock);
+ for (pCur = g_pRTMpCallbackHead; pCur; pCur = pCur->pNext)
+ if ( pCur->pvUser == pvUser
+ && pCur->pfnCallback == pfnCallback)
+ break;
+ RTSpinlockRelease(g_hRTMpNotifySpinLock);
+ AssertMsgReturn(!pCur, ("pCur=%p pfnCallback=%p pvUser=%p\n", pCur, pfnCallback, pvUser), VERR_ALREADY_EXISTS);
+
+ /*
+ * Allocate a new record and attempt to insert it.
+ */
+ pNew = (PRTMPNOTIFYREG)RTMemAlloc(sizeof(*pNew));
+ if (!pNew)
+ return VERR_NO_MEMORY;
+
+ pNew->pNext = NULL;
+ pNew->pfnCallback = pfnCallback;
+ pNew->pvUser = pvUser;
+ memset(&pNew->bmDone[0], 0xff, sizeof(pNew->bmDone));
+
+ RTSpinlockAcquire(g_hRTMpNotifySpinLock);
+
+ pCur = g_pRTMpCallbackHead;
+ if (!pCur)
+ g_pRTMpCallbackHead = pNew;
+ else
+ {
+ for (pCur = g_pRTMpCallbackHead; ; pCur = pCur->pNext)
+ if ( pCur->pvUser == pvUser
+ && pCur->pfnCallback == pfnCallback)
+ break;
+ else if (!pCur->pNext)
+ {
+ pCur->pNext = pNew;
+ pCur = NULL;
+ break;
+ }
+ }
+
+ ASMAtomicIncU32(&g_iRTMpGeneration);
+
+ RTSpinlockRelease(g_hRTMpNotifySpinLock);
+
+ /* duplicate? */
+ if (pCur)
+ {
+ RTMemFree(pCur);
+ AssertMsgFailedReturn(("pCur=%p pfnCallback=%p pvUser=%p\n", pCur, pfnCallback, pvUser), VERR_ALREADY_EXISTS);
+ }
+
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTMpNotificationRegister);
+
+
+RTDECL(int) RTMpNotificationDeregister(PFNRTMPNOTIFICATION pfnCallback, void *pvUser)
+{
+ PRTMPNOTIFYREG pPrev;
+ PRTMPNOTIFYREG pCur;
+
+ /*
+ * Validation.
+ */
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+ AssertReturn(g_hRTMpNotifySpinLock != NIL_RTSPINLOCK, VERR_WRONG_ORDER);
+ RT_ASSERT_INTS_ON();
+
+ /*
+ * Find and unlink the record from the list.
+ */
+ RTSpinlockAcquire(g_hRTMpNotifySpinLock);
+ pPrev = NULL;
+ for (pCur = g_pRTMpCallbackHead; pCur; pCur = pCur->pNext)
+ {
+ if ( pCur->pvUser == pvUser
+ && pCur->pfnCallback == pfnCallback)
+ break;
+ pPrev = pCur;
+ }
+ if (pCur)
+ {
+ if (pPrev)
+ pPrev->pNext = pCur->pNext;
+ else
+ g_pRTMpCallbackHead = pCur->pNext;
+ ASMAtomicIncU32(&g_iRTMpGeneration);
+ }
+ RTSpinlockRelease(g_hRTMpNotifySpinLock);
+
+ if (!pCur)
+ return VERR_NOT_FOUND;
+
+ /*
+ * Invalidate and free the record.
+ */
+ pCur->pNext = NULL;
+ pCur->pfnCallback = NULL;
+ RTMemFree(pCur);
+
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTMpNotificationDeregister);
+
+
+DECLHIDDEN(int) rtR0MpNotificationInit(void)
+{
+ int rc = RTSpinlockCreate((PRTSPINLOCK)&g_hRTMpNotifySpinLock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTR0Mp");
+ if (RT_SUCCESS(rc))
+ {
+ rc = rtR0MpNotificationNativeInit();
+ if (RT_SUCCESS(rc))
+ return rc;
+
+ RTSpinlockDestroy(g_hRTMpNotifySpinLock);
+ g_hRTMpNotifySpinLock = NIL_RTSPINLOCK;
+ }
+ return rc;
+}
+
+
+DECLHIDDEN(void) rtR0MpNotificationTerm(void)
+{
+ PRTMPNOTIFYREG pHead;
+ RTSPINLOCK hSpinlock = g_hRTMpNotifySpinLock;
+ AssertReturnVoid(hSpinlock != NIL_RTSPINLOCK);
+
+ rtR0MpNotificationNativeTerm();
+
+ /* pick up the list and the spinlock. */
+ RTSpinlockAcquire(hSpinlock);
+ ASMAtomicWriteHandle(&g_hRTMpNotifySpinLock, NIL_RTSPINLOCK);
+ pHead = g_pRTMpCallbackHead;
+ g_pRTMpCallbackHead = NULL;
+ ASMAtomicIncU32(&g_iRTMpGeneration);
+ RTSpinlockRelease(hSpinlock);
+
+ /* free the list. */
+ while (pHead)
+ {
+ PRTMPNOTIFYREG pFree = pHead;
+ pHead = pHead->pNext;
+
+ pFree->pNext = NULL;
+ pFree->pfnCallback = NULL;
+ RTMemFree(pFree);
+ }
+
+ RTSpinlockDestroy(hSpinlock);
+}
+
diff --git a/src/VBox/Runtime/r0drv/netbsd/Makefile.kup b/src/VBox/Runtime/r0drv/netbsd/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/Makefile.kup
diff --git a/src/VBox/Runtime/r0drv/netbsd/RTLogWriteStdOut-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/RTLogWriteStdOut-r0drv-netbsd.c
new file mode 100644
index 00000000..df61da3a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/RTLogWriteStdOut-r0drv-netbsd.c
@@ -0,0 +1,39 @@
+/* $Id: RTLogWriteStdOut-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Log To StdOut, Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/log.h>
+
+
+RTDECL(void) RTLogWriteStdOut(const char *pch, size_t cb)
+{
+ printf("%.*s", (int)cb, pch);
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/alloc-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/alloc-r0drv-netbsd.c
new file mode 100644
index 00000000..8c0f7621
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/alloc-r0drv-netbsd.c
@@ -0,0 +1,165 @@
+/* $Id: alloc-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver, NetBSD.
+ */
+/*
+ * Copyright (C) 2014-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ * ---------------------------------------------------------------------------
+ *
+ * This code is based on:
+ *
+ * Copyright (c) 2014 Arto Huusko
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/param.h>
+
+#include "r0drv/alloc-r0drv.h"
+
+
+DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
+{
+ size_t cbAllocated = cb;
+ PRTMEMHDR pHdr = NULL;
+
+ if (fFlags & RTMEMHDR_FLAG_ZEROED)
+ pHdr = kmem_zalloc(cb + sizeof(RTMEMHDR), KM_NOSLEEP);
+ else
+ pHdr = kmem_alloc(cb + sizeof(RTMEMHDR), KM_NOSLEEP);
+
+ if (RT_UNLIKELY(!pHdr))
+ return VERR_NO_MEMORY;
+
+ pHdr->u32Magic = RTMEMHDR_MAGIC;
+ pHdr->fFlags = fFlags;
+ pHdr->cb = cbAllocated;
+ pHdr->cbReq = cb;
+
+ *ppHdr = pHdr;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr)
+{
+ pHdr->u32Magic += 1;
+
+ kmem_free(pHdr, pHdr->cb + sizeof(RTMEMHDR));
+}
+
+RTR0DECL(void) RTMemContFree(void *pv, size_t cb)
+{
+ if (pv)
+ {
+ cb = round_page(cb);
+
+ paddr_t pa;
+ pmap_extract(pmap_kernel(), (vaddr_t)pv, &pa);
+
+ /*
+ * Reconstruct pglist to free the physical pages
+ */
+ struct pglist rlist;
+ TAILQ_INIT(&rlist);
+
+ for (paddr_t pa2 = pa ; pa2 < pa + cb ; pa2 += PAGE_SIZE) {
+ struct vm_page *page = PHYS_TO_VM_PAGE(pa2);
+ TAILQ_INSERT_TAIL(&rlist, page, pageq.queue);
+ }
+
+ /* Unmap */
+ pmap_kremove((vaddr_t)pv, cb);
+
+ /* Free the virtual space */
+ uvm_km_free(kernel_map, (vaddr_t)pv, cb, UVM_KMF_VAONLY);
+
+ /* Free the physical pages */
+ uvm_pglistfree(&rlist);
+ }
+}
+
+RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtr(pPhys);
+ Assert(cb > 0);
+
+ cb = round_page(cb);
+
+ vaddr_t virt = uvm_km_alloc(kernel_map, cb, 0,
+ UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
+ if (virt == 0)
+ return NULL;
+
+ struct pglist rlist;
+
+ if (uvm_pglistalloc(cb, 0, (paddr_t)0xFFFFFFFF,
+ PAGE_SIZE, 0, &rlist, 1, 1) != 0)
+ {
+ uvm_km_free(kernel_map, virt, cb, UVM_KMF_VAONLY);
+ return NULL;
+ }
+
+ struct vm_page *page;
+ vaddr_t virt2 = virt;
+ TAILQ_FOREACH(page, &rlist, pageq.queue)
+ {
+ pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page),
+ VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, 0);
+ virt2 += PAGE_SIZE;
+ }
+
+ page = TAILQ_FIRST(&rlist);
+ *pPhys = VM_PAGE_TO_PHYS(page);
+
+ return (void *)virt;
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/assert-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/assert-r0drv-netbsd.c
new file mode 100644
index 00000000..43872a23
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/assert-r0drv-netbsd.c
@@ -0,0 +1,63 @@
+/* $Id: assert-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Assertion Workers, Ring-0 Drivers, NetBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+
+#include <iprt/assert.h>
+#include <iprt/log.h>
+#include <iprt/stdarg.h>
+
+#include "internal/assert.h"
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
+{
+ printf("\r\n!!Assertion Failed!!\r\n"
+ "Expression: %s\r\n"
+ "Location : %s(%d) %s\r\n",
+ pszExpr, pszFile, uLine, pszFunction);
+}
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg2V(bool fInitial, const char *pszFormat, va_list va)
+{
+ /** @todo implement rtR0AssertNativeMsg2V. */
+}
+
+
+RTR0DECL(void) RTR0AssertPanicSystem(void)
+{
+ panic("%s%s", g_szRTAssertMsg1, g_szRTAssertMsg2);
+
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/initterm-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/initterm-r0drv-netbsd.c
new file mode 100644
index 00000000..99a511c2
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/initterm-r0drv-netbsd.c
@@ -0,0 +1,52 @@
+/* $Id: initterm-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Initialization & Termination, Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+
+#include <iprt/errcore.h>
+
+#include "internal/initterm.h"
+
+
+DECLHIDDEN(int) rtR0InitNative(void)
+{
+ /* nothing to do */
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0TermNative(void)
+{
+ /* nothing to undo */
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/memobj-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/memobj-r0drv-netbsd.c
new file mode 100644
index 00000000..4d4b5bba
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/memobj-r0drv-netbsd.c
@@ -0,0 +1,558 @@
+/* $Id: memobj-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects, NetBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ * Copyright (c) 2011 Andriy Gapon <avg@FreeBSD.org>
+ * Copyright (c) 2014 Arto Huusko
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+
+#include <iprt/memobj.h>
+#include <iprt/mem.h>
+#include <iprt/err.h>
+#include <iprt/assert.h>
+#include <iprt/log.h>
+#include <iprt/param.h>
+#include <iprt/process.h>
+#include "internal/memobj.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The NetBSD version of the memory object structure.
+ */
+typedef struct RTR0MEMOBJNETBSD
+{
+ /** The core structure. */
+ RTR0MEMOBJINTERNAL Core;
+ size_t size;
+ struct pglist pglist;
+} RTR0MEMOBJNETBSD, *PRTR0MEMOBJNETBSD;
+
+
+typedef struct vm_map* vm_map_t;
+
+/**
+ * Gets the virtual memory map the specified object is mapped into.
+ *
+ * @returns VM map handle on success, NULL if no map.
+ * @param pMem The memory object.
+ */
+static vm_map_t rtR0MemObjNetBSDGetMap(PRTR0MEMOBJINTERNAL pMem)
+{
+ switch (pMem->enmType)
+ {
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_CONT:
+ return kernel_map;
+
+ case RTR0MEMOBJTYPE_PHYS:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ return NULL; /* pretend these have no mapping atm. */
+
+ case RTR0MEMOBJTYPE_LOCK:
+ return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
+ ? kernel_map
+ : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
+
+ default:
+ return NULL;
+ }
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
+{
+ PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem;
+ int rc;
+
+ switch (pMemNetBSD->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_PAGE:
+ {
+ kmem_free(pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
+ break;
+ }
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_CONT:
+ {
+ /* Unmap */
+ pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
+ /* Free the virtual space */
+ uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
+ /* Free the physical pages */
+ uvm_pglistfree(&pMemNetBSD->pglist);
+ break;
+ }
+ case RTR0MEMOBJTYPE_PHYS:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ {
+ /* Free the physical pages */
+ uvm_pglistfree(&pMemNetBSD->pglist);
+ break;
+ }
+ case RTR0MEMOBJTYPE_LOCK:
+ if (pMemNetBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
+ {
+ uvm_map_pageable(
+ &((struct proc *)pMemNetBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map,
+ (vaddr_t)pMemNetBSD->Core.pv,
+ ((vaddr_t)pMemNetBSD->Core.pv) + pMemNetBSD->Core.cb,
+ 1, 0);
+ }
+ break;
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
+ {
+ uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
+ }
+ break;
+ case RTR0MEMOBJTYPE_MAPPING:
+ if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
+ {
+ pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
+ uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
+ }
+ break;
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", pMemNetBSD->Core.enmType));
+ return VERR_INTERNAL_ERROR;
+ }
+
+ return VINF_SUCCESS;
+}
+
+static int rtR0MemObjNetBSDAllocHelper(PRTR0MEMOBJNETBSD pMemNetBSD, size_t cb, bool fExecutable,
+ paddr_t VmPhysAddrHigh, bool fContiguous)
+{
+ /* Virtual space first */
+ vaddr_t virt = uvm_km_alloc(kernel_map, cb, 0,
+ UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
+ if (virt == 0)
+ return VERR_NO_MEMORY;
+
+ struct pglist *rlist = &pMemNetBSD->pglist;
+
+ int nsegs = fContiguous ? 1 : INT_MAX;
+
+ /* Physical pages */
+ if (uvm_pglistalloc(cb, 0, VmPhysAddrHigh,
+ PAGE_SIZE, 0, rlist, nsegs, 1) != 0)
+ {
+ uvm_km_free(kernel_map, virt, cb, UVM_KMF_VAONLY);
+ return VERR_NO_MEMORY;
+ }
+
+ /* Map */
+ struct vm_page *page;
+ vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
+ if (fExecutable)
+ prot |= VM_PROT_EXECUTE;
+ vaddr_t virt2 = virt;
+ TAILQ_FOREACH(page, rlist, pageq.queue)
+ {
+ pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
+ virt2 += PAGE_SIZE;
+ }
+
+ pMemNetBSD->Core.pv = (void *)virt;
+ if (fContiguous)
+ {
+ page = TAILQ_FIRST(rlist);
+ pMemNetBSD->Core.u.Cont.Phys = VM_PAGE_TO_PHYS(page);
+ }
+ return VINF_SUCCESS;
+}
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
+ RTR0MEMOBJTYPE_PAGE, NULL, cb);
+ if (!pMemNetBSD)
+ return VERR_NO_MEMORY;
+
+ void *pvMem = kmem_alloc(cb, KM_SLEEP);
+ if (RT_UNLIKELY(!pvMem))
+ {
+ rtR0MemObjDelete(&pMemNetBSD->Core);
+ return VERR_NO_PAGE_MEMORY;
+ }
+ if (fExecutable)
+ {
+ pmap_protect(pmap_kernel(), (vaddr_t)pvMem, ((vaddr_t)pvMem) + cb,
+ VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
+ }
+
+ pMemNetBSD->Core.pv = pvMem;
+ *ppMem = &pMemNetBSD->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
+ RTR0MEMOBJTYPE_LOW, NULL, cb);
+ if (!pMemNetBSD)
+ return VERR_NO_MEMORY;
+
+ int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, false);
+ if (rc)
+ {
+ rtR0MemObjDelete(&pMemNetBSD->Core);
+ return rc;
+ }
+
+ *ppMem = &pMemNetBSD->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
+ RTR0MEMOBJTYPE_CONT, NULL, cb);
+ if (!pMemNetBSD)
+ return VERR_NO_MEMORY;
+
+ int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, true);
+ if (rc)
+ {
+ rtR0MemObjDelete(&pMemNetBSD->Core);
+ return rc;
+ }
+
+ *ppMem = &pMemNetBSD->Core;
+ return VINF_SUCCESS;
+}
+
+
+static int rtR0MemObjNetBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
+ size_t cb,
+ RTHCPHYS PhysHighest, size_t uAlignment,
+ bool fContiguous)
+{
+ paddr_t VmPhysAddrHigh;
+
+ /* create the object. */
+ PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
+ enmType, NULL, cb);
+ if (!pMemNetBSD)
+ return VERR_NO_MEMORY;
+
+ if (PhysHighest != NIL_RTHCPHYS)
+ VmPhysAddrHigh = PhysHighest;
+ else
+ VmPhysAddrHigh = ~(paddr_t)0;
+
+ int nsegs = fContiguous ? 1 : INT_MAX;
+
+ int error = uvm_pglistalloc(cb, 0, VmPhysAddrHigh, uAlignment, 0, &pMemNetBSD->pglist, nsegs, 1);
+ if (error)
+ {
+ rtR0MemObjDelete(&pMemNetBSD->Core);
+ return VERR_NO_MEMORY;
+ }
+
+ if (fContiguous)
+ {
+ Assert(enmType == RTR0MEMOBJTYPE_PHYS);
+ const struct vm_page * const pg = TAILQ_FIRST(&pMemNetBSD->pglist);
+ pMemNetBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pg);
+ pMemNetBSD->Core.u.Phys.fAllocated = true;
+ }
+ *ppMem = &pMemNetBSD->Core;
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
+{
+ return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
+{
+ return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
+{
+ AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
+
+ /* create the object. */
+ PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (!pMemNetBSD)
+ return VERR_NO_MEMORY;
+
+ /* there is no allocation here, it needs to be mapped somewhere first. */
+ pMemNetBSD->Core.u.Phys.fAllocated = false;
+ pMemNetBSD->Core.u.Phys.PhysBase = Phys;
+ pMemNetBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
+ TAILQ_INIT(&pMemNetBSD->pglist);
+ *ppMem = &pMemNetBSD->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
+{
+ PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
+ if (!pMemNetBSD)
+ return VERR_NO_MEMORY;
+
+ int rc = uvm_map_pageable(
+ &((struct proc *)R0Process)->p_vmspace->vm_map,
+ R3Ptr,
+ R3Ptr + cb,
+ 0, 0);
+ if (rc)
+ {
+ rtR0MemObjDelete(&pMemNetBSD->Core);
+ return VERR_NO_MEMORY;
+ }
+
+ pMemNetBSD->Core.u.Lock.R0Process = R0Process;
+ *ppMem = &pMemNetBSD->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
+{
+ /* Kernel memory (always?) wired; all memory allocated by vbox code is? */
+ PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
+ if (!pMemNetBSD)
+ return VERR_NO_MEMORY;
+
+ pMemNetBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
+ pMemNetBSD->Core.pv = pv;
+ *ppMem = &pMemNetBSD->Core;
+ return VINF_SUCCESS;
+}
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
+{
+ if (pvFixed != (void *)-1)
+ {
+ /* can we support this? or can we assume the virtual space is already reserved? */
+ printf("reserve specified kernel virtual address not supported\n");
+ return VERR_NOT_SUPPORTED;
+ }
+
+ PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
+ if (!pMemNetBSD)
+ return VERR_NO_MEMORY;
+
+ vaddr_t virt = uvm_km_alloc(kernel_map, cb, uAlignment,
+ UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
+ if (virt == 0)
+ {
+ rtR0MemObjDelete(&pMemNetBSD->Core);
+ return VERR_NO_MEMORY;
+ }
+
+ pMemNetBSD->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
+ pMemNetBSD->Core.pv = (void *)virt;
+ *ppMem = &pMemNetBSD->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
+{
+ printf("NativeReserveUser\n");
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
+ unsigned fProt, size_t offSub, size_t cbSub)
+{
+ if (pvFixed != (void *)-1)
+ {
+ /* can we support this? or can we assume the virtual space is already reserved? */
+ printf("map to specified kernel virtual address not supported\n");
+ return VERR_NOT_SUPPORTED;
+ }
+
+ PRTR0MEMOBJNETBSD pMemNetBSD0 = (PRTR0MEMOBJNETBSD)pMemToMap;
+ if ((pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS)
+ && (pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC))
+ {
+ printf("memory to map is not physical\n");
+ return VERR_NOT_SUPPORTED;
+ }
+ size_t sz = cbSub > 0 ? cbSub : pMemNetBSD0->Core.cb;
+
+ PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_MAPPING, NULL, sz);
+
+ vaddr_t virt = uvm_km_alloc(kernel_map, sz, uAlignment,
+ UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
+ if (virt == 0)
+ {
+ rtR0MemObjDelete(&pMemNetBSD->Core);
+ return VERR_NO_MEMORY;
+ }
+
+ vm_prot_t prot = 0;
+
+ if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
+ prot |= VM_PROT_READ;
+ if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
+ prot |= VM_PROT_WRITE;
+ if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
+ prot |= VM_PROT_EXECUTE;
+
+ struct vm_page *page;
+ vaddr_t virt2 = virt;
+ size_t map_pos = 0;
+ TAILQ_FOREACH(page, &pMemNetBSD0->pglist, pageq.queue)
+ {
+ if (map_pos >= offSub)
+ {
+ if (cbSub > 0 && (map_pos >= offSub + cbSub))
+ break;
+
+ pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
+ virt2 += PAGE_SIZE;
+ }
+ map_pos += PAGE_SIZE;
+ }
+
+ pMemNetBSD->Core.pv = (void *)virt;
+ pMemNetBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
+ *ppMem = &pMemNetBSD->Core;
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
+ unsigned fProt, RTR0PROCESS R0Process)
+{
+ printf("NativeMapUser\n");
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
+{
+ vm_prot_t ProtectionFlags = 0;
+ vaddr_t AddrStart = (vaddr_t)pMem->pv + offSub;
+ vm_map_t pVmMap = rtR0MemObjNetBSDGetMap(pMem);
+
+ if (!pVmMap)
+ return VERR_NOT_SUPPORTED;
+
+ if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
+ ProtectionFlags |= UVM_PROT_R;
+ if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
+ ProtectionFlags |= UVM_PROT_W;
+ if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
+ ProtectionFlags |= UVM_PROT_X;
+
+ int error = uvm_map_protect(pVmMap, AddrStart, AddrStart + cbSub,
+ ProtectionFlags, 0);
+ if (!error)
+ return VINF_SUCCESS;
+
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
+{
+ PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem;
+
+ switch (pMemNetBSD->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ {
+ vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage);
+ paddr_t pa = 0;
+ pmap_extract(pmap_kernel(), va, &pa);
+ return pa;
+ }
+ case RTR0MEMOBJTYPE_CONT:
+ return pMemNetBSD->Core.u.Cont.Phys + ptoa(iPage);
+ case RTR0MEMOBJTYPE_PHYS:
+ return pMemNetBSD->Core.u.Phys.PhysBase + ptoa(iPage);
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ {
+ struct vm_page *page;
+ size_t i = 0;
+ TAILQ_FOREACH(page, &pMemNetBSD->pglist, pageq.queue)
+ {
+ if (i == iPage)
+ break;
+ i++;
+ }
+ return VM_PAGE_TO_PHYS(page);
+ }
+ case RTR0MEMOBJTYPE_LOCK:
+ case RTR0MEMOBJTYPE_MAPPING:
+ {
+ pmap_t pmap;
+ if (pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
+ pmap = pmap_kernel();
+ else
+ pmap = ((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map.pmap;
+ vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage);
+ paddr_t pa = 0;
+ pmap_extract(pmap, va, &pa);
+ return pa;
+ }
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ return NIL_RTHCPHYS;
+ default:
+ return NIL_RTHCPHYS;
+ }
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/memuserkernel-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/memuserkernel-r0drv-netbsd.c
new file mode 100644
index 00000000..cf67730d
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/memuserkernel-r0drv-netbsd.c
@@ -0,0 +1,82 @@
+/* $Id: memuserkernel-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - User & Kernel Memory, Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+
+#include <iprt/mem.h>
+#include <iprt/errcore.h>
+
+
+RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb)
+{
+ int rc = copyin((const void *)R3PtrSrc, pvDst, cb);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb)
+{
+ int rc = copyout(pvSrc, (void *)R3PtrDst, cb);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(bool) RTR0MemUserIsValidAddr(RTR3PTR R3Ptr)
+{
+ return R3Ptr < VM_MAXUSER_ADDRESS;
+}
+
+
+RTR0DECL(bool) RTR0MemKernelIsValidAddr(void *pv)
+{
+ return (uintptr_t)pv >= VM_MAXUSER_ADDRESS;
+}
+
+
+RTR0DECL(bool) RTR0MemAreKrnlAndUsrDifferent(void)
+{
+ return true;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyFrom(void *pvDst, void const *pvSrc, size_t cb)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyTo(void *pvDst, void const *pvSrc, size_t cb)
+{
+ return VERR_NOT_SUPPORTED;
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/mp-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/mp-r0drv-netbsd.c
new file mode 100644
index 00000000..29472496
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/mp-r0drv-netbsd.c
@@ -0,0 +1,43 @@
+/* $Id: mp-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Multiprocessor, Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+
+#include <iprt/mp.h>
+#include <iprt/errcore.h>
+#include <iprt/asm.h>
+#include <iprt/cpuset.h>
+#include "r0drv/mp-r0drv.h"
+
+
+RTDECL(RTCPUID) RTMpCpuId(void)
+{
+ return cpu_number();
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/process-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/process-r0drv-netbsd.c
new file mode 100644
index 00000000..5b58bfe2
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/process-r0drv-netbsd.c
@@ -0,0 +1,51 @@
+/* $Id: process-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Process Management, Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+
+#include <iprt/process.h>
+
+
+RTDECL(RTPROCESS) RTProcSelf(void)
+{
+ struct proc *pSelf = curproc;
+ return pSelf->p_pid;
+}
+
+
+RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void)
+{
+ return (RTR0PROCESS)curproc;
+}
+
diff --git a/src/VBox/Runtime/r0drv/netbsd/semevent-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/semevent-r0drv-netbsd.c
new file mode 100644
index 00000000..716e5396
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/semevent-r0drv-netbsd.c
@@ -0,0 +1,255 @@
+/* $Id: semevent-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Single Release Event Semaphores, Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENT_WITHOUT_REMAPPING
+#include "the-netbsd-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+
+#include "sleepqueue-r0drv-netbsd.h"
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * NetBSD event semaphore.
+ */
+typedef struct RTSEMEVENTINTERNAL
+{
+ /** Magic value (RTSEMEVENT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The object status - !0 when signaled and 0 when reset. */
+ uint32_t volatile fState;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+} RTSEMEVENTINTERNAL, *PRTSEMEVENTINTERNAL;
+
+
+RTDECL(int) RTSemEventCreate(PRTSEMEVENT phEventSem)
+{
+ return RTSemEventCreateEx(phEventSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...)
+{
+ AssertCompile(sizeof(RTSEMEVENTINTERNAL) > sizeof(void *));
+ AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER);
+ Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL));
+ AssertPtrReturn(phEventSem, VERR_INVALID_POINTER);
+
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)RTMemAllocZ(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ pThis->u32Magic = RTSEMEVENT_MAGIC;
+ pThis->cRefs = 1;
+ pThis->fState = 0;
+
+ *phEventSem = pThis;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Retains a reference to the event semaphore.
+ *
+ * @param pThis The event semaphore.
+ */
+DECLINLINE(void) rtR0SemEventBsdRetain(PRTSEMEVENTINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs < 100000); NOREF(cRefs);
+}
+
+
+/**
+ * Releases a reference to the event semaphore.
+ *
+ * @param pThis The event semaphore.
+ */
+DECLINLINE(void) rtR0SemEventBsdRelease(PRTSEMEVENTINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ RTMemFree(pThis);
+}
+
+
+RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = hEventSem;
+ if (pThis == NIL_RTSEMEVENT)
+ return VINF_SUCCESS;
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ Assert(pThis->cRefs > 0);
+
+ /*
+ * Invalidate it and signal the object just in case.
+ */
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENT_MAGIC);
+ ASMAtomicWriteU32(&pThis->fState, 0);
+ rtR0SemBsdBroadcast(pThis);
+ rtR0SemEventBsdRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ rtR0SemEventBsdRetain(pThis);
+
+ /*
+ * Signal the event object.
+ */
+ ASMAtomicWriteU32(&pThis->fState, 1);
+ rtR0SemBsdSignal(pThis);
+ rtR0SemEventBsdRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+/**
+ * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventWaitEx.
+ * @param uTimeout See RTSemEventWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventWait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ int rc;
+
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ rtR0SemEventBsdRetain(pThis);
+
+ /*
+ * Try grab the event without setting up the wait.
+ */
+ if (ASMAtomicCmpXchgU32(&pThis->fState, 0, 1))
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait.
+ */
+ RTR0SEMBSDSLEEP Wait;
+ rc = rtR0SemBsdWaitInit(&Wait, fFlags, uTimeout, pThis);
+ if (RT_SUCCESS(rc))
+ {
+ for (;;)
+ {
+ /* The destruction test. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else
+ {
+ rtR0SemBsdWaitPrepare(&Wait);
+
+ /* Check the exit conditions. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else if (ASMAtomicCmpXchgU32(&pThis->fState, 0, 1))
+ rc = VINF_SUCCESS;
+ else if (rtR0SemBsdWaitHasTimedOut(&Wait))
+ rc = VERR_TIMEOUT;
+ else if (rtR0SemBsdWaitWasInterrupted(&Wait))
+ rc = VERR_INTERRUPTED;
+ else
+ {
+ /* Do the wait and then recheck the conditions. */
+ rtR0SemBsdWaitDoIt(&Wait);
+ continue;
+ }
+ }
+ break;
+ }
+
+ rtR0SemBsdWaitDelete(&Wait);
+ }
+ }
+
+ rtR0SemEventBsdRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventWaitEx(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventWait(hEventSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventWait(hEventSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+RT_EXPORT_SYMBOL(RTSemEventWaitEx);
+
+
+RTDECL(int) RTSemEventWaitExDebug(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventWait(hEventSem, fFlags, uTimeout, &SrcPos);
+}
+RT_EXPORT_SYMBOL(RTSemEventWaitExDebug);
+
+
+RTDECL(uint32_t) RTSemEventGetResolution(void)
+{
+ return 1000000000 / hz;
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/semeventmulti-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/semeventmulti-r0drv-netbsd.c
new file mode 100644
index 00000000..48c2cec8
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/semeventmulti-r0drv-netbsd.c
@@ -0,0 +1,319 @@
+/* $Id: semeventmulti-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Multiple Release Event Semaphores, Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENTMULTI_WITHOUT_REMAPPING
+#include "the-netbsd-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/lockvalidator.h>
+
+#include "sleepqueue-r0drv-netbsd.h"
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** @name fStateAndGen values
+ * @{ */
+/** The state bit number. */
+#define RTSEMEVENTMULTIBSD_STATE_BIT 0
+/** The state mask. */
+#define RTSEMEVENTMULTIBSD_STATE_MASK RT_BIT_32(RTSEMEVENTMULTIBSD_STATE_BIT)
+/** The generation mask. */
+#define RTSEMEVENTMULTIBSD_GEN_MASK ~RTSEMEVENTMULTIBSD_STATE_MASK
+/** The generation shift. */
+#define RTSEMEVENTMULTIBSD_GEN_SHIFT 1
+/** The initial variable value. */
+#define RTSEMEVENTMULTIBSD_STATE_GEN_INIT UINT32_C(0xfffffffc)
+/** @} */
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * NetBSD multiple release event semaphore.
+ */
+typedef struct RTSEMEVENTMULTIINTERNAL
+{
+ /** Magic value (RTSEMEVENTMULTI_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The object state bit and generation counter.
+ * The generation counter is incremented every time the object is
+ * signalled. */
+ uint32_t volatile fStateAndGen;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+} RTSEMEVENTMULTIINTERNAL, *PRTSEMEVENTMULTIINTERNAL;
+
+
+RTDECL(int) RTSemEventMultiCreate(PRTSEMEVENTMULTI phEventMultiSem)
+{
+ return RTSemEventMultiCreateEx(phEventMultiSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass,
+ const char *pszNameFmt, ...)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis;
+
+ AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
+ pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMEVENTMULTI_MAGIC;
+ pThis->fStateAndGen = RTSEMEVENTMULTIBSD_STATE_GEN_INIT;
+ pThis->cRefs = 1;
+
+ *phEventMultiSem = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Retain a reference to the semaphore.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiBsdRetain(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+}
+
+
+/**
+ * Release a reference, destroy the thing if necessary.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiBsdRelease(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ {
+ Assert(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC);
+ RTMemFree(pThis);
+ }
+}
+
+
+RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (pThis == NIL_RTSEMEVENTMULTI)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ Assert(pThis->cRefs > 0);
+
+ /*
+ * Invalidate it and signal the object just in case.
+ */
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENTMULTI_MAGIC);
+ ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTIBSD_GEN_MASK);
+ rtR0SemBsdBroadcast(pThis);
+ rtR0SemEventMultiBsdRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
+{
+ uint32_t fNew;
+ uint32_t fOld;
+
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiBsdRetain(pThis);
+
+ /*
+ * Signal the event object. The cause of the parnoia here is racing to try
+ * deal with racing RTSemEventMultiSignal calls (should probably be
+ * forbidden, but it's relatively easy to handle).
+ */
+ do
+ {
+ fNew = fOld = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ fNew += 1 << RTSEMEVENTMULTIBSD_GEN_SHIFT;
+ fNew |= RTSEMEVENTMULTIBSD_STATE_MASK;
+ }
+ while (!ASMAtomicCmpXchgU32(&pThis->fStateAndGen, fNew, fOld));
+
+ rtR0SemBsdBroadcast(pThis);
+ rtR0SemEventMultiBsdRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiBsdRetain(pThis);
+
+ /*
+ * Reset it.
+ */
+ ASMAtomicAndU32(&pThis->fStateAndGen, ~RTSEMEVENTMULTIBSD_STATE_MASK);
+
+ rtR0SemEventMultiBsdRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventMultiWaitEx.
+ * @param uTimeout See RTSemEventMultiWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventMultiBsdWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ uint32_t fOrgStateAndGen;
+ int rc;
+
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiBsdRetain(pThis);
+
+ /*
+ * Is the event already signalled or do we have to wait?
+ */
+ fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ if (fOrgStateAndGen & RTSEMEVENTMULTIBSD_STATE_MASK)
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait.
+ */
+ RTR0SEMBSDSLEEP Wait;
+ rc = rtR0SemBsdWaitInit(&Wait, fFlags, uTimeout, pThis);
+ if (RT_SUCCESS(rc))
+ {
+ for (;;)
+ {
+ /* The destruction test. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else
+ {
+ rtR0SemBsdWaitPrepare(&Wait);
+
+ /* Check the exit conditions. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else if (ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen)
+ rc = VINF_SUCCESS;
+ else if (rtR0SemBsdWaitHasTimedOut(&Wait))
+ rc = VERR_TIMEOUT;
+ else if (rtR0SemBsdWaitWasInterrupted(&Wait))
+ rc = VERR_INTERRUPTED;
+ else
+ {
+ /* Do the wait and then recheck the conditions. */
+ rtR0SemBsdWaitDoIt(&Wait);
+ continue;
+ }
+ }
+ break;
+ }
+
+ rtR0SemBsdWaitDelete(&Wait);
+ }
+ }
+
+ rtR0SemEventMultiBsdRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventMultiWaitEx(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventMultiBsdWait(hEventMultiSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventMultiBsdWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiWaitEx);
+
+
+RTDECL(int) RTSemEventMultiWaitExDebug(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventMultiBsdWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiWaitExDebug);
+
+
+RTDECL(uint32_t) RTSemEventMultiGetResolution(void)
+{
+ return rtR0SemBsdWaitGetResolution();
+}
+RT_EXPORT_SYMBOL(RTSemEventMultiGetResolution);
diff --git a/src/VBox/Runtime/r0drv/netbsd/semfastmutex-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/semfastmutex-r0drv-netbsd.c
new file mode 100644
index 00000000..8327ae4b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/semfastmutex-r0drv-netbsd.c
@@ -0,0 +1,114 @@
+/* $Id: semfastmutex-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Fast Mutex Semaphores, Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+
+#include <iprt/semaphore.h>
+#include <iprt/errcore.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the NetBSD (sleep) mutex.
+ */
+typedef struct RTSEMFASTMUTEXINTERNAL
+{
+ /** Magic value (RTSEMFASTMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** The NetBSD shared/exclusive lock mutex. */
+ krwlock_t Mtx;
+} RTSEMFASTMUTEXINTERNAL, *PRTSEMFASTMUTEXINTERNAL;
+
+
+RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx)
+{
+ AssertCompile(sizeof(RTSEMFASTMUTEXINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phFastMtx, VERR_INVALID_POINTER);
+
+ PRTSEMFASTMUTEXINTERNAL pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAllocZ(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMFASTMUTEX_MAGIC;
+ rw_init(&pThis->Mtx);
+
+ *phFastMtx = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ if (pThis == NIL_RTSEMFASTMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD);
+ rw_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRequest(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ rw_enter(&pThis->Mtx, RW_WRITER);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ rw_exit(&pThis->Mtx);
+ return VINF_SUCCESS;
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/semmutex-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/semmutex-r0drv-netbsd.c
new file mode 100644
index 00000000..e815ae6c
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/semmutex-r0drv-netbsd.c
@@ -0,0 +1,218 @@
+/* $Id: semmutex-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Mutex Semaphores, Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (C) 2010-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMMUTEX_WITHOUT_REMAPPING
+#include "the-netbsd-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+#include <iprt/time.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the NetBSD (sleep) mutex.
+ */
+typedef struct RTSEMMUTEXINTERNAL
+{
+ /** Magic value (RTSEMMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** The NetBSD shared/exclusive lock mutex. */
+ struct sx SxLock;
+} RTSEMMUTEXINTERNAL, *PRTSEMMUTEXINTERNAL;
+
+
+RTDECL(int) RTSemMutexCreate(PRTSEMMUTEX phMutexSem)
+{
+ AssertCompile(sizeof(RTSEMMUTEXINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phMutexSem, VERR_INVALID_POINTER);
+
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)RTMemAllocZ(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMMUTEX_MAGIC;
+ sx_init_flags(&pThis->SxLock, "IPRT Mutex Semaphore", SX_RECURSE);
+
+ *phMutexSem = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ if (pThis == NIL_RTSEMMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTSEMMUTEX_MAGIC_DEAD, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE);
+
+ sx_destroy(&pThis->SxLock);
+ RTMemFree(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ int rc;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ if (cMillies == RT_INDEFINITE_WAIT)
+ {
+ sx_xlock(&pThis->SxLock);
+ rc = VINF_SUCCESS;
+ }
+ else if (!cMillies)
+ {
+ if (sx_try_xlock(&pThis->SxLock))
+ rc = VINF_SUCCESS;
+ else
+ rc = VERR_TIMEOUT;
+ }
+ /*
+ * GROSS HACK: poll implementation of timeout.
+ */
+ /** @todo Implement timeouts in RTSemMutexRequest. */
+ else if (sx_try_xlock(&pThis->SxLock))
+ rc = VINF_SUCCESS;
+ else
+ {
+ uint64_t StartTS = RTTimeSystemMilliTS();
+ rc = VERR_TIMEOUT;
+ do
+ {
+ RTThreadSleep(1);
+ if (sx_try_xlock(&pThis->SxLock))
+ {
+ rc = VINF_SUCCESS;
+ break;
+ }
+ } while (RTTimeSystemMilliTS() - StartTS < cMillies);
+ }
+
+ return rc;
+}
+
+
+RTDECL(int) RTSemMutexRequestDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ return RTSemMutexRequest(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ int rc;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ if (cMillies == RT_INDEFINITE_WAIT)
+ {
+ if (!sx_xlock_sig(&pThis->SxLock))
+ rc = VINF_SUCCESS;
+ else
+ rc = VERR_INTERRUPTED;
+ }
+ else if (!cMillies)
+ {
+ if (sx_try_xlock(&pThis->SxLock))
+ rc = VINF_SUCCESS;
+ else
+ rc = VERR_TIMEOUT;
+ }
+ /*
+ * GROSS HACK: poll implementation of timeout.
+ */
+ /** @todo Implement timeouts and interrupt checks in
+ * RTSemMutexRequestNoResume. */
+ else if (sx_try_xlock(&pThis->SxLock))
+ rc = VINF_SUCCESS;
+ else
+ {
+ uint64_t StartTS = RTTimeSystemMilliTS();
+ rc = VERR_TIMEOUT;
+ do
+ {
+ RTThreadSleep(1);
+ if (sx_try_xlock(&pThis->SxLock))
+ {
+ rc = VINF_SUCCESS;
+ break;
+ }
+ } while (RTTimeSystemMilliTS() - StartTS < cMillies);
+ }
+
+ return rc;
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResumeDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ return RTSemMutexRequestNoResume(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ sx_xunlock(&pThis->SxLock);
+ return VINF_SUCCESS;
+}
+
+
+
+RTDECL(bool) RTSemMutexIsOwned(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ AssertPtrReturn(pThis, false);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), false);
+
+ return sx_xlocked(&pThis->SxLock);
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/sleepqueue-r0drv-netbsd.h b/src/VBox/Runtime/r0drv/netbsd/sleepqueue-r0drv-netbsd.h
new file mode 100644
index 00000000..2a153786
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/sleepqueue-r0drv-netbsd.h
@@ -0,0 +1,281 @@
+/* $Id: sleepqueue-r0drv-netbsd.h $ */
+/** @file
+ * IPRT - NetBSD Ring-0 Driver Helpers for Abstracting Sleep Queues,
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_netbsd_sleepqueue_r0drv_netbsd_h
+#define IPRT_INCLUDED_SRC_r0drv_netbsd_sleepqueue_r0drv_netbsd_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include "the-netbsd-kernel.h"
+
+#include <iprt/asm-math.h>
+#include <iprt/err.h>
+#include <iprt/time.h>
+
+static syncobj_t vbox_syncobj = {
+ SOBJ_SLEEPQ_FIFO,
+ sleepq_unsleep,
+ sleepq_changepri,
+ sleepq_lendpri,
+ syncobj_noowner,
+};
+
+/**
+ * Kernel mode Linux wait state structure.
+ */
+typedef struct RTR0SEMBSDSLEEP
+{
+ /** The absolute timeout given as nano seconds since the start of the
+ * monotonic clock. */
+ uint64_t uNsAbsTimeout;
+ /** The timeout in ticks. Updated after waiting. */
+ int iTimeout;
+ /** Set if it's an indefinite wait. */
+ bool fIndefinite;
+ /** Set if we've already timed out.
+ * Set by rtR0SemBsdWaitDoIt and read by rtR0SemBsdWaitHasTimedOut. */
+ bool fTimedOut;
+ /** Flag whether the wait was interrupted. */
+ bool fInterrupted;
+ /** flag whether the wait is interruptible or not. */
+ bool fInterruptible;
+ /** Opaque wait channel id. */
+ wchan_t wchan;
+ sleepq_t *sq;
+ kmutex_t *sq_lock;
+} RTR0SEMBSDSLEEP;
+/** Pointer to a NetBSD wait state. */
+typedef RTR0SEMBSDSLEEP *PRTR0SEMBSDSLEEP;
+
+
+/**
+ * Updates the timeout of the NetBSD wait.
+ *
+ * @returns RTSEMWAIT_FLAGS_INDEFINITE if the timeout value is too big.
+ * 0 otherwise
+ * @param pWait The wait structure.
+ * @param uTimeout The relative timeout in nanoseconds.
+ */
+DECLINLINE(void) rtR0SemBsdWaitUpdateTimeout(PRTR0SEMBSDSLEEP pWait)
+{
+ /* Convert absolute timeout to ticks */
+ uint64_t now = RTTimeNanoTS();
+ if (now >= pWait->uNsAbsTimeout) {
+ pWait->iTimeout = 0;
+ } else {
+ uint64_t nanos = pWait->uNsAbsTimeout - now;
+ pWait->iTimeout = hz * nanos / 1000000000;
+ /* for 1ms wait, wait at least one tick ?? */
+ if ((pWait->iTimeout == 0) && (nanos >= 1000000)) {
+ pWait->iTimeout = 1;
+ }
+ }
+}
+
+/**
+ * Initializes a wait.
+ *
+ * The caller MUST check the wait condition BEFORE calling this function or the
+ * timeout logic will be flawed.
+ *
+ * @returns VINF_SUCCESS or VERR_TIMEOUT.
+ * @param pWait The wait structure.
+ * @param fFlags The wait flags.
+ * @param uTimeout The timeout.
+ * @param pvWaitChan The opaque wait channel.
+ */
+DECLINLINE(int) rtR0SemBsdWaitInit(PRTR0SEMBSDSLEEP pWait, uint32_t fFlags, uint64_t uTimeout,
+ void *pvWaitChan)
+{
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE) {
+ pWait->fIndefinite = true;
+ pWait->iTimeout = 0;
+ pWait->uNsAbsTimeout = 0;
+ } else {
+ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE) {
+ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) {
+ pWait->uNsAbsTimeout = uTimeout * 1000000 + RTTimeSystemNanoTS();
+ } else {
+ pWait->uNsAbsTimeout = uTimeout + RTTimeSystemNanoTS();
+ }
+ } else {
+ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) {
+ pWait->uNsAbsTimeout = uTimeout * 1000000;
+ } else {
+ pWait->uNsAbsTimeout = uTimeout;
+ }
+ }
+ rtR0SemBsdWaitUpdateTimeout(pWait);
+ if (pWait->iTimeout == 0) {
+ return VERR_TIMEOUT;
+ }
+ }
+
+ pWait->fTimedOut = false;
+ /*
+ * Initialize the wait queue related bits.
+ */
+ pWait->fInterruptible = fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE
+ ? true : false;
+ pWait->fInterrupted = false;
+ pWait->wchan = pvWaitChan;
+ pWait->sq = NULL;
+ pWait->sq_lock = NULL;
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Prepares the next wait.
+ *
+ * This must be called before rtR0SemBsdWaitDoIt, and the caller should check
+ * the exit conditions inbetween the two calls.
+ *
+ * @param pWait The wait structure.
+ */
+DECLINLINE(void) rtR0SemBsdWaitPrepare(PRTR0SEMBSDSLEEP pWait)
+{
+ pWait->sq = sleeptab_lookup(&sleeptab, pWait->wchan, &pWait->sq_lock);
+}
+
+/**
+ * Do the actual wait.
+ *
+ * @param pWait The wait structure.
+ */
+DECLINLINE(void) rtR0SemBsdWaitDoIt(PRTR0SEMBSDSLEEP pWait)
+{
+ sleepq_enter(pWait->sq, curlwp, pWait->sq_lock);
+ sleepq_enqueue(pWait->sq, pWait->wchan, "VBoxIS", &vbox_syncobj);
+
+ pWait->sq = NULL;
+ pWait->sq_lock = NULL;
+
+ int error = sleepq_block(pWait->iTimeout, pWait->fInterruptible);
+ if (error == EWOULDBLOCK) {
+ if (!pWait->fIndefinite) {
+ pWait->fTimedOut = true;
+ }
+ } else if (error == ERESTART) {
+ if (pWait->fInterruptible) {
+ pWait->fInterrupted = true;
+ } else if (!pWait->fIndefinite) {
+ rtR0SemBsdWaitUpdateTimeout(pWait);
+ if (pWait->iTimeout == 0) {
+ pWait->fTimedOut = true;
+ }
+ }
+ } else if (error == EINTR) {
+ if (pWait->fInterruptible) {
+ pWait->fInterrupted = true;
+ } else if (!pWait->fIndefinite) {
+ rtR0SemBsdWaitUpdateTimeout(pWait);
+ if (pWait->iTimeout == 0) {
+ pWait->fTimedOut = true;
+ }
+ }
+ } else if (error) {
+ AssertMsgFailed(("sleepq_block -> %d\n", error));
+ }
+}
+
+
+/**
+ * Checks if a NetBSD wait was interrupted.
+ *
+ * @returns true / false
+ * @param pWait The wait structure.
+ * @remarks This shall be called before the first rtR0SemLnxWaitDoIt().
+ */
+DECLINLINE(bool) rtR0SemBsdWaitWasInterrupted(PRTR0SEMBSDSLEEP pWait)
+{
+ return pWait->fInterrupted;
+}
+
+
+/**
+ * Checks if a NetBSD wait has timed out.
+ *
+ * @returns true / false
+ * @param pWait The wait structure.
+ */
+DECLINLINE(bool) rtR0SemBsdWaitHasTimedOut(PRTR0SEMBSDSLEEP pWait)
+{
+ return pWait->fTimedOut;
+}
+
+
+/**
+ * Deletes a NetBSD wait.
+ *
+ * @param pWait The wait structure.
+ */
+DECLINLINE(void) rtR0SemBsdWaitDelete(PRTR0SEMBSDSLEEP pWait)
+{
+ if (pWait->sq_lock != NULL) {
+ mutex_spin_exit(pWait->sq_lock);
+ pWait->sq = NULL;
+ pWait->sq_lock = NULL;
+ }
+}
+
+
+/**
+ * Signals the wait channel.
+ *
+ * @param pvWaitChan The opaque wait channel handle.
+ */
+DECLINLINE(void) rtR0SemBsdSignal(void *pvWaitChan)
+{
+ kmutex_t *mp;
+ sleepq_t *sq = sleeptab_lookup(&sleeptab, pvWaitChan, &mp);
+ sleepq_wake(sq, pvWaitChan, 1, mp);
+}
+
+/**
+ * Wakes up all waiters on the wait channel.
+ *
+ * @param pvWaitChan The opaque wait channel handle.
+ */
+DECLINLINE(void) rtR0SemBsdBroadcast(void *pvWaitChan)
+{
+ kmutex_t *mp;
+ sleepq_t *sq = sleeptab_lookup(&sleeptab, pvWaitChan, &mp);
+ sleepq_wake(sq, pvWaitChan, ~0u, mp);
+}
+
+/**
+ * Gets the max resolution of the timeout machinery.
+ *
+ * @returns Resolution specified in nanoseconds.
+ */
+DECLINLINE(uint32_t) rtR0SemBsdWaitGetResolution(void)
+{
+ return 1000000000 / hz; /* ns */
+}
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_netbsd_sleepqueue_r0drv_netbsd_h */
diff --git a/src/VBox/Runtime/r0drv/netbsd/spinlock-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/spinlock-r0drv-netbsd.c
new file mode 100644
index 00000000..44c6714d
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/spinlock-r0drv-netbsd.c
@@ -0,0 +1,148 @@
+/* $Id: spinlock-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Spinlocks, Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/spinlock.h>
+#include <iprt/errcore.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/thread.h>
+#include <iprt/mp.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the struct mtx type.
+ */
+typedef struct RTSPINLOCKINTERNAL
+{
+ /** Spinlock magic value (RTSPINLOCK_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The spinlock. */
+ kmutex_t pSpinLock;
+ /** Saved interrupt flag. */
+ uint32_t volatile fIntSaved;
+ /** The spinlock creation flags. */
+ uint32_t fFlags;
+} RTSPINLOCKINTERNAL, *PRTSPINLOCKINTERNAL;
+
+
+RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER);
+
+ /*
+ * Allocate.
+ */
+ AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *));
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAllocZ(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize & return.
+ */
+ pThis->u32Magic = RTSPINLOCK_MAGIC;
+ pThis->fFlags = fFlags;
+ pThis->fIntSaved = 0;
+ if (fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) {
+ mutex_init(&pThis->pSpinLock, MUTEX_DEFAULT, IPL_BIO);
+ } else {
+ mutex_init(&pThis->pSpinLock, MUTEX_DEFAULT, IPL_NONE);
+ }
+
+ *pSpinlock = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSpinlockDestroy(RTSPINLOCK Spinlock)
+{
+ /*
+ * Validate input.
+ */
+ RT_ASSERT_INTS_ON();
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertMsgReturn(pThis->u32Magic == RTSPINLOCK_MAGIC,
+ ("Invalid spinlock %p magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Make the lock invalid and release the memory.
+ */
+ ASMAtomicIncU32(&pThis->u32Magic);
+ mutex_destroy(&pThis->pSpinLock);
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) {
+ mutex_spin_enter(&pThis->pSpinLock);
+ } else {
+ mutex_enter(&pThis->pSpinLock);
+ }
+}
+
+
+RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) {
+ mutex_spin_exit(&pThis->pSpinLock);
+ } else {
+ mutex_exit(&pThis->pSpinLock);
+ }
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/the-netbsd-kernel.h b/src/VBox/Runtime/r0drv/netbsd/the-netbsd-kernel.h
new file mode 100644
index 00000000..8bc61190
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/the-netbsd-kernel.h
@@ -0,0 +1,75 @@
+/* $Id: the-netbsd-kernel.h $ */
+/** @file
+ * IPRT - Ring-0 Driver, The NetBSD Kernel Headers.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_netbsd_the_netbsd_kernel_h
+#define IPRT_INCLUDED_SRC_r0drv_netbsd_the_netbsd_kernel_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/types.h>
+
+/* Deal with conflicts first. */
+#include <sys/param.h>
+#undef PVM
+#include <sys/bus.h>
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/kernel.h>
+#include <sys/uio.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/syslimits.h>
+#include <sys/sleepq.h>
+#include <sys/unistd.h>
+#include <sys/kthread.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sched.h>
+#include <sys/callout.h>
+#include <sys/rwlock.h>
+#include <sys/kmem.h>
+#include <sys/cpu.h>
+#include <sys/vmmeter.h> /* cnt */
+#include <sys/resourcevar.h>
+#include <uvm/uvm.h>
+#include <uvm/uvm_extern.h>
+#include <uvm/uvm_page.h>
+#include <machine/cpu.h>
+
+/**
+ * Check whether we can use kmem_alloc_prot.
+ */
+#if 0 /** @todo Not available yet. */
+# define USE_KMEM_ALLOC_PROT
+#endif
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_netbsd_the_netbsd_kernel_h */
diff --git a/src/VBox/Runtime/r0drv/netbsd/thread-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/thread-r0drv-netbsd.c
new file mode 100644
index 00000000..17f07e8a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/thread-r0drv-netbsd.c
@@ -0,0 +1,181 @@
+/* $Id: thread-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Threads (Part 1), Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (C) 2007-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mp.h>
+#include "internal/thread.h"
+
+
+RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
+{
+ return (RTNATIVETHREAD)curlwp;
+}
+
+
+static int rtR0ThreadNbsdSleepCommon(RTMSINTERVAL cMillies)
+{
+ int rc;
+ int cTicks;
+
+ /*
+ * 0 ms sleep -> yield.
+ */
+ if (!cMillies)
+ {
+ RTThreadYield();
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * Translate milliseconds into ticks and go to sleep.
+ */
+ if (cMillies != RT_INDEFINITE_WAIT)
+ {
+ if (hz == 1000)
+ cTicks = cMillies;
+ else if (hz == 100)
+ cTicks = cMillies / 10;
+ else
+ {
+ int64_t cTicks64 = ((uint64_t)cMillies * hz) / 1000;
+ cTicks = (int)cTicks64;
+ if (cTicks != cTicks64)
+ cTicks = INT_MAX;
+ }
+ }
+ else
+ cTicks = 0; /* requires giant lock! */
+
+ rc = tsleep((void *)RTThreadSleep,
+ PZERO | PCATCH,
+ "iprtsl", /* max 6 chars */
+ cTicks);
+ switch (rc)
+ {
+ case 0:
+ return VINF_SUCCESS;
+ case EWOULDBLOCK:
+ return VERR_TIMEOUT;
+ case EINTR:
+ case ERESTART:
+ return VERR_INTERRUPTED;
+ default:
+ AssertMsgFailed(("%d\n", rc));
+ return VERR_NO_TRANSLATION;
+ }
+}
+
+
+RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadNbsdSleepCommon(cMillies);
+}
+
+
+RTDECL(int) RTThreadSleepNoLog(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadNbsdSleepCommon(cMillies);
+}
+
+
+RTDECL(bool) RTThreadYield(void)
+{
+ yield();
+ return true;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+
+ return curlwp->l_dopreempt == 0
+ && ASMIntAreEnabled(); /** @todo is there a native netbsd function/macro for this? */
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+
+ return curlwp->l_dopreempt;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
+{
+ /* yes, RTThreadPreemptIsPending is reliable. */
+ return true;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPossible(void)
+{
+ /* yes, kernel preemption is possible. */
+ return true;
+}
+
+
+RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+
+ curlwp->l_nopreempt++;
+ __insn_barrier();
+}
+
+
+RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ __insn_barrier();
+ if (--curlwp->l_nopreempt != 0)
+ return;
+ __insn_barrier();
+ if (__predict_false(curlwp->l_dopreempt))
+ kpreempt(0);
+ __insn_barrier();
+}
+
+
+RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD); NOREF(hThread);
+ /** @todo NetBSD: Implement RTThreadIsInInterrupt. Required for guest
+ * additions! */
+ return !ASMIntAreEnabled();
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/thread2-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/thread2-r0drv-netbsd.c
new file mode 100644
index 00000000..f7faaae7
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/thread2-r0drv-netbsd.c
@@ -0,0 +1,135 @@
+/* $Id: thread2-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Threads (Part 2), Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+
+#include <iprt/thread.h>
+#include <iprt/errcore.h>
+#include <iprt/assert.h>
+
+#include "internal/thread.h"
+
+
+DECLHIDDEN(int) rtThreadNativeInit(void)
+{
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(RTTHREAD) RTThreadSelf(void)
+{
+ return rtThreadGetByNative(RTThreadNativeSelf());
+}
+
+
+DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
+{
+ int iPriority;
+
+ switch (enmType)
+ {
+ case RTTHREADTYPE_INFREQUENT_POLLER: iPriority = PZERO + 8; break;
+ case RTTHREADTYPE_EMULATION: iPriority = PZERO + 4; break;
+ case RTTHREADTYPE_DEFAULT: iPriority = PZERO; break;
+ case RTTHREADTYPE_MSG_PUMP: iPriority = PZERO - 4; break;
+ case RTTHREADTYPE_IO: iPriority = PRIBIO; break;
+ case RTTHREADTYPE_TIMER: iPriority = PSWP; break;
+ default:
+ AssertMsgFailed(("enmType=%d\n", enmType));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ lwp_lock(curlwp);
+ lwp_changepri(curlwp, iPriority);
+ lwp_unlock(curlwp);
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtThreadNativeAdopt(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+ /* There is nothing special that needs doing here, but the
+ user really better know what he's cooking. */
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtThreadNativeWaitKludge(PRTTHREADINT pThread)
+{
+ /** @todo fix RTThreadWait/RTR0Term race on netbsd. */
+ RTThreadSleep(1);
+}
+
+
+DECLHIDDEN(void) rtThreadNativeDestroy(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+}
+
+
+/**
+ * Native thread main function.
+ *
+ * @param pvThreadInt The thread structure.
+ */
+static void rtThreadNativeMain(void *pvThreadInt)
+{
+ const struct lwp *Self = curlwp;
+ PRTTHREADINT pThreadInt = (PRTTHREADINT)pvThreadInt;
+ int rc;
+
+ rc = rtThreadMain(pThreadInt, (RTNATIVETHREAD)Self, &pThreadInt->szName[0]);
+
+ kthread_exit(rc);
+}
+
+
+DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
+{
+ int rc;
+ struct lwp *l;
+
+ rc = kthread_create(PRI_NONE, 0, NULL, rtThreadNativeMain, (void *)pThreadInt, &l, "%s", pThreadInt->szName);
+
+ if (!rc)
+ {
+ *pNativeThread = (RTNATIVETHREAD)l;
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = RTErrConvertFromErrno(rc);
+ return rc;
+}
diff --git a/src/VBox/Runtime/r0drv/netbsd/time-r0drv-netbsd.c b/src/VBox/Runtime/r0drv/netbsd/time-r0drv-netbsd.c
new file mode 100644
index 00000000..162fceb5
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/netbsd/time-r0drv-netbsd.c
@@ -0,0 +1,73 @@
+/* $Id: time-r0drv-netbsd.c $ */
+/** @file
+ * IPRT - Time, Ring-0 Driver, NetBSD.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-netbsd-kernel.h"
+#define RTTIME_INCL_TIMESPEC
+
+#include <iprt/time.h>
+
+
+RTDECL(uint64_t) RTTimeNanoTS(void)
+{
+ struct timespec tsp;
+ nanotime(&tsp);
+ return tsp.tv_sec * RT_NS_1SEC_64
+ + tsp.tv_nsec;
+}
+
+
+RTDECL(uint64_t) RTTimeMilliTS(void)
+{
+ return RTTimeNanoTS() / RT_NS_1MS;
+}
+
+
+RTDECL(uint64_t) RTTimeSystemNanoTS(void)
+{
+ return RTTimeNanoTS();
+}
+
+
+RTDECL(uint64_t) RTTimeSystemMilliTS(void)
+{
+ return RTTimeMilliTS();
+}
+
+
+RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
+{
+ struct timespec tsp;
+ nanotime(&tsp);
+ return RTTimeSpecSetTimespec(pTime, &tsp);
+}
diff --git a/src/VBox/Runtime/r0drv/nt/Makefile.kup b/src/VBox/Runtime/r0drv/nt/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/Makefile.kup
diff --git a/src/VBox/Runtime/r0drv/nt/RTLogWriteDebugger-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/RTLogWriteDebugger-r0drv-nt.cpp
new file mode 100644
index 00000000..fd7a674e
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/RTLogWriteDebugger-r0drv-nt.cpp
@@ -0,0 +1,39 @@
+/* $Id: RTLogWriteDebugger-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Log To Debugger, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#include "the-nt-kernel.h"
+#include <iprt/log.h>
+#include <iprt/assert.h>
+
+
+RTDECL(void) RTLogWriteDebugger(const char *pch, size_t cb)
+{
+ if (pch[cb] != '\0')
+ AssertBreakpoint();
+ DbgPrint("%s", pch);
+ return;
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/RTTimerGetSystemGranularity-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/RTTimerGetSystemGranularity-r0drv-nt.cpp
new file mode 100644
index 00000000..d1f599c6
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/RTTimerGetSystemGranularity-r0drv-nt.cpp
@@ -0,0 +1,61 @@
+/* $Id: RTTimerGetSystemGranularity-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - RTTimerGetSystemGranularity, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+
+#include <iprt/timer.h>
+#include <iprt/errcore.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+
+#include "internal-r0drv-nt.h"
+#include "internal/magics.h"
+
+
+RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
+{
+ /*
+ * Get the default/max timer increment value, return it if ExSetTimerResolution
+ * isn't available. According to the sysinternals guys NtQueryTimerResolution
+ * is only available in userland and they find it equally annoying.
+ */
+ ULONG ulTimeInc = KeQueryTimeIncrement();
+ if (!g_pfnrtNtExSetTimerResolution)
+ return ulTimeInc * 100; /* The value is in 100ns, the funny NT unit. */
+
+ /*
+ * Use the value returned by ExSetTimerResolution. Since the kernel is keeping
+ * count of these calls, we have to do two calls that cancel each other out.
+ */
+ g_pfnrtNtExSetTimerResolution(ulTimeInc, TRUE);
+ ULONG ulResolution = g_pfnrtNtExSetTimerResolution(0 /*ignored*/, FALSE);
+ return ulResolution * 100; /* NT -> ns */
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/alloc-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/alloc-r0drv-nt.cpp
new file mode 100644
index 00000000..ef59491f
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/alloc-r0drv-nt.cpp
@@ -0,0 +1,151 @@
+/* $Id: alloc-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include "r0drv/alloc-r0drv.h"
+#include "internal-r0drv-nt.h"
+
+
+/**
+ * OS specific allocation function.
+ */
+DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
+{
+ if (!(fFlags & RTMEMHDR_FLAG_ANY_CTX))
+ {
+ PRTMEMHDR pHdr;
+ if (g_pfnrtExAllocatePoolWithTag)
+ {
+ if (!(fFlags & RTMEMHDR_FLAG_EXEC) && g_uRtNtVersion >= RTNT_MAKE_VERSION(8,0))
+ pHdr = (PRTMEMHDR)g_pfnrtExAllocatePoolWithTag(NonPagedPoolNx, cb + sizeof(*pHdr), IPRT_NT_POOL_TAG);
+ else
+ pHdr = (PRTMEMHDR)g_pfnrtExAllocatePoolWithTag(NonPagedPool, cb + sizeof(*pHdr), IPRT_NT_POOL_TAG);
+ }
+ else
+ pHdr = (PRTMEMHDR)ExAllocatePool(NonPagedPool, cb + sizeof(*pHdr));
+ if (pHdr)
+ {
+ pHdr->u32Magic = RTMEMHDR_MAGIC;
+ pHdr->fFlags = fFlags;
+ pHdr->cb = (uint32_t)cb; Assert(pHdr->cb == cb);
+ pHdr->cbReq = (uint32_t)cb;
+ *ppHdr = pHdr;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+ }
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * OS specific free function.
+ */
+DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr)
+{
+ pHdr->u32Magic += 1;
+ if (g_pfnrtExFreePoolWithTag)
+ g_pfnrtExFreePoolWithTag(pHdr, IPRT_NT_POOL_TAG);
+ else
+ ExFreePool(pHdr);
+}
+
+
+/**
+ * Allocates physical contiguous memory (below 4GB).
+ * The allocation is page aligned and its contents is undefined.
+ *
+ * @returns Pointer to the memory block. This is page aligned.
+ * @param pPhys Where to store the physical address.
+ * @param cb The allocation size in bytes. This is always
+ * rounded up to PAGE_SIZE.
+ */
+RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
+{
+ /*
+ * validate input.
+ */
+ Assert(VALID_PTR(pPhys));
+ Assert(cb > 0);
+
+ /*
+ * Allocate and get physical address.
+ * Make sure the return is page aligned.
+ */
+ PHYSICAL_ADDRESS MaxPhysAddr;
+ MaxPhysAddr.HighPart = 0;
+ MaxPhysAddr.LowPart = 0xffffffff;
+ cb = RT_ALIGN_Z(cb, PAGE_SIZE);
+ void *pv = MmAllocateContiguousMemory(cb, MaxPhysAddr);
+ if (pv)
+ {
+ if (!((uintptr_t)pv & PAGE_OFFSET_MASK)) /* paranoia */
+ {
+ PHYSICAL_ADDRESS PhysAddr = MmGetPhysicalAddress(pv);
+ if (!PhysAddr.HighPart) /* paranoia */
+ {
+ *pPhys = (RTCCPHYS)PhysAddr.LowPart;
+ return pv;
+ }
+
+ /* failure */
+ AssertMsgFailed(("MMAllocContiguousMemory returned high address! PhysAddr=%RX64\n", (uint64_t)PhysAddr.QuadPart));
+ }
+ else
+ AssertMsgFailed(("MMAllocContiguousMemory didn't return a page aligned address - %p!\n", pv));
+
+ MmFreeContiguousMemory(pv);
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Frees memory allocated ysing RTMemContAlloc().
+ *
+ * @param pv Pointer to return from RTMemContAlloc().
+ * @param cb The cb parameter passed to RTMemContAlloc().
+ */
+RTR0DECL(void) RTMemContFree(void *pv, size_t cb)
+{
+ if (pv)
+ {
+ Assert(cb > 0); NOREF(cb);
+ AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv));
+ MmFreeContiguousMemory(pv);
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/assert-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/assert-r0drv-nt.cpp
new file mode 100644
index 00000000..07101b3e
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/assert-r0drv-nt.cpp
@@ -0,0 +1,66 @@
+/* $Id: assert-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Assertion Workers, Ring-0 Drivers, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+
+#include <iprt/assert.h>
+#include <iprt/log.h>
+#include <iprt/string.h>
+#include <iprt/stdarg.h>
+
+#include "internal/assert.h"
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
+{
+ DbgPrint("\n!!Assertion Failed!!\n"
+ "Expression: %s\n"
+ "Location : %s(%d) %s\n",
+ pszExpr, pszFile, uLine, pszFunction);
+}
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg2V(bool fInitial, const char *pszFormat, va_list va)
+{
+ char szMsg[256];
+
+ RTStrPrintfV(szMsg, sizeof(szMsg) - 1, pszFormat, va);
+ szMsg[sizeof(szMsg) - 1] = '\0';
+ DbgPrint("%s", szMsg);
+
+ NOREF(fInitial);
+}
+
+
+RTR0DECL(void) RTR0AssertPanicSystem(void)
+{
+ /** @todo implement RTR0AssertPanicSystem. */
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/dbgkrnlinfo-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/dbgkrnlinfo-r0drv-nt.cpp
new file mode 100644
index 00000000..6aa64c87
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/dbgkrnlinfo-r0drv-nt.cpp
@@ -0,0 +1,761 @@
+/* $Id: dbgkrnlinfo-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Kernel Debug Information, R0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define IMAGE_NT_HEADERS NT_IMAGE_NT_HEADERS
+#define IMAGE_NT_HEADERS32 NT_IMAGE_NT_HEADERS32
+#define IMAGE_NT_HEADERS64 NT_IMAGE_NT_HEADERS64
+#define PIMAGE_NT_HEADERS NT_PIMAGE_NT_HEADERS
+#define PIMAGE_NT_HEADERS32 NT_PIMAGE_NT_HEADERS32
+#define PIMAGE_NT_HEADERS64 NT_PIMAGE_NT_HEADERS64
+#define IPRT_NT_MAP_TO_ZW
+#include "the-nt-kernel.h"
+#include <iprt/dbg.h>
+
+#include <iprt/err.h>
+#include <iprt/log.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+#include <iprt/utf16.h>
+#include "internal-r0drv-nt.h"
+#include "internal/magics.h"
+
+#undef IMAGE_NT_HEADERS
+#undef IMAGE_NT_HEADERS32
+#undef IMAGE_NT_HEADERS64
+#undef PIMAGE_NT_HEADERS
+#undef PIMAGE_NT_HEADERS32
+#undef PIMAGE_NT_HEADERS64
+#include <iprt/formats/pecoff.h>
+#include <iprt/formats/mz.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** Private logging macro, will use DbgPrint! */
+#ifdef IN_GUEST
+# define RTR0DBG_NT_ERROR_LOG(a) do { RTLogBackdoorPrintf a; DbgPrint a; } while (0)
+# define RTR0DBG_NT_DEBUG_LOG(a) do { RTLogBackdoorPrintf a; DbgPrint a; } while (0)
+#else
+# define RTR0DBG_NT_ERROR_LOG(a) do { DbgPrint a; } while (0)
+# define RTR0DBG_NT_DEBUG_LOG(a) do { DbgPrint a; } while (0)
+#endif
+#ifndef LOG_ENABLED
+# undef RTR0DBG_NT_DEBUG_LOG
+# define RTR0DBG_NT_DEBUG_LOG(a) do { } while (0)
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+#define PIMAGE_NT_HEADERS RT_CONCAT(PIMAGE_NT_HEADERS, ARCH_BITS)
+
+/**
+ * Information we cache for a kernel module.
+ */
+typedef struct RTDBGNTKRNLMODINFO
+{
+ /** The module name. */
+ char szName[32];
+
+ /** The image base. */
+ uint8_t const *pbImageBase;
+ /** The NT headers. */
+ PIMAGE_NT_HEADERS pNtHdrs;
+ /** Set if this module parsed okay and all fields are valid. */
+ bool fOkay;
+ /** The NT header offset/RVA. */
+ uint32_t offNtHdrs;
+ /** The end of the section headers. */
+ uint32_t offEndSectHdrs;
+ /** The end of the image. */
+ uint32_t cbImage;
+ /** Offset of the export directory. */
+ uint32_t offExportDir;
+ /** Size of the export directory. */
+ uint32_t cbExportDir;
+
+ /** Exported functions and data by ordinal (RVAs). */
+ uint32_t const *paoffExports;
+ /** The number of exports. */
+ uint32_t cExports;
+ /** The number of exported names. */
+ uint32_t cNamedExports;
+ /** Pointer to the array of exported names (RVAs to strings). */
+ uint32_t const *paoffNamedExports;
+ /** Array parallel to paoffNamedExports with the corresponding ordinals
+ * (indexes into paoffExports). */
+ uint16_t const *pau16NameOrdinals;
+} RTDBGNTKRNLMODINFO;
+/** Pointer to kernel module info. */
+typedef RTDBGNTKRNLMODINFO *PRTDBGNTKRNLMODINFO;
+/** Pointer to const kernel module info. */
+typedef RTDBGNTKRNLMODINFO const *PCRTDBGNTKRNLMODINFO;
+
+
+/**
+ * NT kernel info instance.
+ */
+typedef struct RTDBGKRNLINFOINT
+{
+ /** Magic value (RTDBGKRNLINFO_MAGIC). */
+ uint32_t u32Magic;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** Number of additional modules in the cache. */
+ uint32_t cModules;
+ /** Additional modules. */
+ RTDBGNTKRNLMODINFO aModules[3];
+} RTDBGKRNLINFOINT;
+
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Pointer to MmGetSystemRoutineAddress.
+ * @note Added in NT v5.0. */
+static decltype(MmGetSystemRoutineAddress) *g_pfnMmGetSystemRoutineAddress = NULL;
+/** Info about the ntoskrnl.exe mapping. */
+static RTDBGNTKRNLMODINFO g_NtOsKrnlInfo = { "ntoskrnl.exe", NULL, NULL, false, 0, 0, 0, 0, 0, NULL, 0, 0, NULL, NULL };
+/** Info about the hal.dll mapping. */
+static RTDBGNTKRNLMODINFO g_HalInfo = { "hal.dll", NULL, NULL, false, 0, 0, 0, 0, 0, NULL, 0, 0, NULL, NULL };
+
+
+
+/**
+ * Looks up an symbol int the export table.
+ *
+ * @returns VINF_SUCCESS or VERR_SYMBOL_NOT_FOUND.
+ * @param pModInfo The module info.
+ * @param pszSymbol The symbol to find.
+ * @param ppvSymbol Where to put the symbol address.
+ *
+ * @note Support library has similar code for in the importless area.
+ */
+static int rtR0DbgKrnlInfoLookupSymbol(PCRTDBGNTKRNLMODINFO pModInfo, const char *pszSymbol, void **ppvSymbol)
+{
+ if (pModInfo->fOkay)
+ {
+ /*
+ * Binary search.
+ */
+ __try
+ {
+ uint32_t iStart = 0;
+ uint32_t iEnd = pModInfo->cNamedExports;
+ while (iStart < iEnd)
+ {
+ uint32_t iCur = iStart + (iEnd - iStart) / 2;
+ uint32_t offExpName = pModInfo->paoffNamedExports[iCur];
+ if (offExpName >= pModInfo->offEndSectHdrs && offExpName < pModInfo->cbImage)
+ { /* likely */ }
+ else
+ {
+ RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlInfoLookupSymbol: %s: Bad export name entry: %#x (iCur=%#x)\n",
+ pModInfo->szName, offExpName, iCur));
+ break;
+ }
+
+ const char *pszExpName = (const char *)&pModInfo->pbImageBase[offExpName];
+ int iDiff = strcmp(pszExpName, pszSymbol);
+ if (iDiff > 0) /* pszExpName > pszSymbol: search chunck before i */
+ iEnd = iCur;
+ else if (iDiff < 0) /* pszExpName < pszSymbol: search chunk after i */
+ iStart = iCur + 1;
+ else /* pszExpName == pszSymbol */
+ {
+ uint16_t iExpOrdinal = pModInfo->pau16NameOrdinals[iCur];
+ if (iExpOrdinal < pModInfo->cExports)
+ {
+ uint32_t offExport = pModInfo->paoffExports[iExpOrdinal];
+ if (offExport - pModInfo->offExportDir >= pModInfo->cbExportDir)
+ {
+ *ppvSymbol = (void *)&pModInfo->pbImageBase[offExport];
+ return VINF_SUCCESS;
+ }
+
+ RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlInfoLookupSymbol: %s: Forwarded symbol '%s': offExport=%#x (dir %#x LB %#x)\n",
+ pModInfo->szName, pszSymbol, offExport, pModInfo->offExportDir, pModInfo->cbExportDir));
+ }
+ else
+ RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlInfoLookupSymbol: %s: Name ordinal for '%s' is out of bounds: %#x (max %#x)\n",
+ pModInfo->szName, iExpOrdinal, pModInfo->cExports));
+ break;
+ }
+ }
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlInfoLookupSymbol: Exception searching '%s' for '%s'...\n",
+ pModInfo->szName, pszSymbol));
+ }
+ }
+
+ *ppvSymbol = NULL;
+ return VERR_SYMBOL_NOT_FOUND;
+}
+
+
+/**
+ * Parses (PE) module headers and fills in the coresponding module info struct.
+ *
+ * @returns true on if success, false if not.
+ * @param pModInfo The module info structure to fill in with parsed
+ * data. The szName and fOkay are set by the
+ * caller, this function does the rest.
+ * @param pbMapping The image mapping address
+ * @param cbMapping The image mapping size.
+ *
+ * @note Support library has similar code for in the importless area.
+ */
+static bool rtR0DbgKrnlNtParseModule(PRTDBGNTKRNLMODINFO pModInfo, uint8_t const *pbMapping, size_t cbMapping)
+{
+#define MODERR_RETURN(a_LogMsg, ...) \
+ do { RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlNtParseModule: " a_LogMsg, __VA_ARGS__)); return false; } while (0)
+
+ pModInfo->pbImageBase = pbMapping;
+
+ /*
+ * Locate the PE header, do some basic validations.
+ */
+ IMAGE_DOS_HEADER const *pMzHdr = (IMAGE_DOS_HEADER const *)pbMapping;
+ uint32_t offNtHdrs = 0;
+ PIMAGE_NT_HEADERS pNtHdrs;
+ if (pMzHdr->e_magic == IMAGE_DOS_SIGNATURE)
+ {
+ offNtHdrs = pMzHdr->e_lfanew;
+ if (offNtHdrs > _2K)
+ MODERR_RETURN("%s: e_lfanew=%#x, expected a lower value\n", pModInfo->szName, offNtHdrs);
+ }
+ pModInfo->pNtHdrs = pNtHdrs = (PIMAGE_NT_HEADERS)&pbMapping[offNtHdrs];
+
+ if (pNtHdrs->Signature != IMAGE_NT_SIGNATURE)
+ MODERR_RETURN("%s: Invalid PE signature: %#x", pModInfo->szName, pNtHdrs->Signature);
+ if (pNtHdrs->FileHeader.SizeOfOptionalHeader != sizeof(pNtHdrs->OptionalHeader))
+ MODERR_RETURN("%s: Unexpected optional header size: %#x\n", pModInfo->szName, pNtHdrs->FileHeader.SizeOfOptionalHeader);
+ if (pNtHdrs->OptionalHeader.Magic != RT_CONCAT3(IMAGE_NT_OPTIONAL_HDR,ARCH_BITS,_MAGIC))
+ MODERR_RETURN("%s: Unexpected optional header magic: %#x\n", pModInfo->szName, pNtHdrs->OptionalHeader.Magic);
+ if (pNtHdrs->OptionalHeader.NumberOfRvaAndSizes != IMAGE_NUMBEROF_DIRECTORY_ENTRIES)
+ MODERR_RETURN("%s: Unexpected number of RVA and sizes: %#x\n", pModInfo->szName, pNtHdrs->OptionalHeader.NumberOfRvaAndSizes);
+
+ pModInfo->offNtHdrs = offNtHdrs;
+ pModInfo->offEndSectHdrs = offNtHdrs
+ + sizeof(*pNtHdrs)
+ + pNtHdrs->FileHeader.NumberOfSections * sizeof(IMAGE_SECTION_HEADER);
+ pModInfo->cbImage = pNtHdrs->OptionalHeader.SizeOfImage;
+ if (pModInfo->cbImage > cbMapping)
+ MODERR_RETURN("%s: The image size %#x is larger than the mapping: %#x\n",
+ pModInfo->szName, pModInfo->cbImage, cbMapping);
+
+ /*
+ * Find the export directory.
+ */
+ IMAGE_DATA_DIRECTORY ExpDir = pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
+ if ( ExpDir.Size < sizeof(IMAGE_EXPORT_DIRECTORY)
+ || ExpDir.VirtualAddress < pModInfo->offEndSectHdrs
+ || ExpDir.VirtualAddress >= pModInfo->cbImage
+ || ExpDir.VirtualAddress + ExpDir.Size > pModInfo->cbImage)
+ MODERR_RETURN("%s: Missing or invalid export directory: %#lx LB %#x\n", pModInfo->szName, ExpDir.VirtualAddress, ExpDir.Size);
+ pModInfo->offExportDir = ExpDir.VirtualAddress;
+ pModInfo->cbExportDir = ExpDir.Size;
+
+ IMAGE_EXPORT_DIRECTORY const *pExpDir = (IMAGE_EXPORT_DIRECTORY const *)&pbMapping[ExpDir.VirtualAddress];
+
+ if ( pExpDir->NumberOfFunctions >= _1M
+ || pExpDir->NumberOfFunctions < 1
+ || pExpDir->NumberOfNames >= _1M
+ || pExpDir->NumberOfNames < 1)
+ MODERR_RETURN("%s: NumberOfNames or/and NumberOfFunctions are outside the expected range: nof=%#x non=%#x\n",
+ pModInfo->szName, pExpDir->NumberOfFunctions, pExpDir->NumberOfNames);
+ pModInfo->cNamedExports = pExpDir->NumberOfNames;
+ pModInfo->cExports = RT_MAX(pExpDir->NumberOfNames, pExpDir->NumberOfFunctions);
+
+ if ( pExpDir->AddressOfFunctions < pModInfo->offEndSectHdrs
+ || pExpDir->AddressOfFunctions >= pModInfo->cbImage
+ || pExpDir->AddressOfFunctions + pModInfo->cExports * sizeof(uint32_t) > pModInfo->cbImage)
+ MODERR_RETURN("%s: Bad AddressOfFunctions: %#x\n", pModInfo->szName, pExpDir->AddressOfFunctions);
+ pModInfo->paoffExports = (uint32_t const *)&pbMapping[pExpDir->AddressOfFunctions];
+
+ if ( pExpDir->AddressOfNames < pModInfo->offEndSectHdrs
+ || pExpDir->AddressOfNames >= pModInfo->cbImage
+ || pExpDir->AddressOfNames + pExpDir->NumberOfNames * sizeof(uint32_t) > pModInfo->cbImage)
+ MODERR_RETURN("%s: Bad AddressOfNames: %#x\n", pModInfo->szName, pExpDir->AddressOfNames);
+ pModInfo->paoffNamedExports = (uint32_t const *)&pbMapping[pExpDir->AddressOfNames];
+
+ if ( pExpDir->AddressOfNameOrdinals < pModInfo->offEndSectHdrs
+ || pExpDir->AddressOfNameOrdinals >= pModInfo->cbImage
+ || pExpDir->AddressOfNameOrdinals + pExpDir->NumberOfNames * sizeof(uint32_t) > pModInfo->cbImage)
+ MODERR_RETURN("%s: Bad AddressOfNameOrdinals: %#x\n", pModInfo->szName, pExpDir->AddressOfNameOrdinals);
+ pModInfo->pau16NameOrdinals = (uint16_t const *)&pbMapping[pExpDir->AddressOfNameOrdinals];
+
+ /*
+ * Success.
+ */
+ return true;
+#undef MODERR_RETURN
+}
+
+
+/**
+ * Searches the module information from the kernel for the NT kernel module, the
+ * HAL module, and optionally one more module.
+ *
+ * If the NT kernel or HAL modules have already been found, they'll be skipped.
+ *
+ * @returns IPRT status code.
+ * @retval VERR_LDR_GENERAL_FAILURE if we failed to parse the NT kernel or HAL.
+ * @retval VERR_BAD_EXE_FORMAT if we failed to parse @a pModInfo.
+ * @retval VERR_MODULE_NOT_FOUND if @a pModInfo wasn't found.
+ * @retval VERR_BUFFER_UNDERFLOW if less that two modules was returned by the
+ * system.
+ *
+ * @param pModInfo Custom module to search for. Optional.
+ */
+static int rtR0DbgKrnlNtInit(PRTDBGNTKRNLMODINFO pModInfo)
+{
+ RTR0DBG_NT_DEBUG_LOG(("rtR0DbgKrnlNtInit: pModInfo=%p\n", pModInfo));
+
+#ifndef IPRT_TARGET_NT4
+ /*
+ * Must manually initialize g_pfnMmGetSystemRoutineAddress, otherwise compiler
+ * generates its own dynamic init code that might not necessarily be called.
+ */
+ g_pfnMmGetSystemRoutineAddress = MmGetSystemRoutineAddress;
+#endif
+
+ /*
+ * Allocate a reasonably large buffer and get the information we need. We don't
+ * need everything since the result starts off with the kernel bits in load order.
+ *
+ * Note! ZwQuerySystemInformation requires NT4. For 3.51 we could possibly emit
+ * the syscall ourselves, if we cared.
+ */
+ uint32_t cModules = pModInfo ? 110 /*32KB*/ : 27 /*8KB*/;
+ ULONG cbInfo = RT_UOFFSETOF_DYN(RTL_PROCESS_MODULES, Modules[cModules]);
+ PRTL_PROCESS_MODULES pInfo = (PRTL_PROCESS_MODULES)RTMemAllocZ(cbInfo);
+ if (!pInfo)
+ {
+ cModules = cModules / 4;
+ cbInfo = RT_UOFFSETOF_DYN(RTL_PROCESS_MODULES, Modules[cModules]);
+ pInfo = (PRTL_PROCESS_MODULES)RTMemAllocZ(cbInfo);
+ if (!pInfo)
+ {
+ RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlNtInit: Out of memory!\n"));
+ return VERR_NO_MEMORY;
+ }
+ }
+
+ int rc;
+ ULONG cbActual = 0;
+ NTSTATUS rcNt = ZwQuerySystemInformation(SystemModuleInformation, pInfo, cbInfo, &cbActual);
+ RTR0DBG_NT_DEBUG_LOG(("rtR0DbgKrnlNtInit: ZwQuerySystemInformation returned %#x and NumberOfModules=%#x\n",
+ rcNt, pInfo->NumberOfModules));
+ if ( NT_SUCCESS(rcNt)
+ || rcNt == STATUS_INFO_LENGTH_MISMATCH)
+ rc = VINF_SUCCESS;
+ else
+ {
+ RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlNtInit: ZwQuerySystemInformation failed: %#x\n", rcNt));
+ rc = RTErrConvertFromNtStatus(rcNt);
+ }
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Search the info. The information is ordered with the kernel bits first,
+ * we expect aleast two modules to be returned to us (kernel + hal)!
+ */
+#if ARCH_BITS == 32
+ uintptr_t const uMinKernelAddr = _2G; /** @todo resolve MmSystemRangeStart */
+#else
+ uintptr_t const uMinKernelAddr = (uintptr_t)MM_SYSTEM_RANGE_START;
+#endif
+ if (pInfo->NumberOfModules < cModules)
+ cModules = pInfo->NumberOfModules;
+ if (cModules < 2)
+ {
+ RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlNtInit: Error! Only %u module(s) returned!\n", cModules));
+ rc = VERR_BUFFER_UNDERFLOW;
+ }
+ for (uint32_t iModule = 0; iModule < cModules; iModule++)
+ RTR0DBG_NT_DEBUG_LOG(("rtR0DbgKrnlNtInit: [%u]= %p LB %#x %s\n", iModule, pInfo->Modules[iModule].ImageBase,
+ pInfo->Modules[iModule].ImageSize, pInfo->Modules[iModule].FullPathName));
+
+ /*
+ * First time around we serch for the NT kernel and HAL. We'll look for NT
+ * kerneland HAL in the first 16 entries, and if not found, use the first
+ * and second entry respectively.
+ */
+ if ( RT_SUCCESS(rc)
+ && !g_NtOsKrnlInfo.pbImageBase
+ && !g_HalInfo.pbImageBase)
+ {
+ /* Find them. */
+ RTR0DBG_NT_DEBUG_LOG(("rtR0DbgKrnlNtInit: Looking for kernel and hal...\n"));
+ uint32_t const cMaxModules = RT_MIN(cModules, 16);
+ uint32_t idxNtOsKrnl = UINT32_MAX;
+ uint32_t idxHal = UINT32_MAX;
+ for (uint32_t iModule = 0; iModule < cMaxModules; iModule++)
+ {
+ RTL_PROCESS_MODULE_INFORMATION const * const pModule = &pInfo->Modules[iModule];
+ if ( (uintptr_t)pModule->ImageBase >= uMinKernelAddr
+ && (uintptr_t)pModule->ImageSize >= _4K)
+ {
+ const char *pszName = (const char *)&pModule->FullPathName[pModule->OffsetToFileName];
+ if ( idxNtOsKrnl == UINT32_MAX
+ && RTStrICmpAscii(pszName, g_NtOsKrnlInfo.szName) == 0)
+ {
+ idxNtOsKrnl = iModule;
+ if (idxHal != UINT32_MAX)
+ break;
+ }
+ else if ( idxHal == UINT32_MAX
+ && RTStrICmpAscii(pszName, g_HalInfo.szName) == 0)
+ {
+ idxHal = iModule;
+ if (idxHal != UINT32_MAX)
+ break;
+ }
+ }
+ }
+ RTR0DBG_NT_DEBUG_LOG(("rtR0DbgKrnlNtInit: idxNtOsKrnl=%#x idxHal=%#x\n", idxNtOsKrnl, idxHal));
+ if (idxNtOsKrnl == UINT32_MAX)
+ {
+ idxNtOsKrnl = 0;
+ RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlNtInit: 'ntoskrnl.exe' not found, picking '%s' instead\n",
+ pInfo->Modules[idxNtOsKrnl].FullPathName));
+ }
+ if (idxHal == UINT32_MAX)
+ {
+ idxHal = 1;
+ RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlNtInit: 'hal.dll' not found, picking '%s' instead\n",
+ pInfo->Modules[idxHal].FullPathName));
+ }
+
+ /* Parse them. */
+ //RTR0DBG_NT_DEBUG_LOG(("rtR0DbgKrnlNtInit: Parsing NT kernel...\n"));
+ __try
+ {
+ g_NtOsKrnlInfo.fOkay = rtR0DbgKrnlNtParseModule(&g_NtOsKrnlInfo,
+ (uint8_t const *)pInfo->Modules[idxNtOsKrnl].ImageBase,
+ pInfo->Modules[idxNtOsKrnl].ImageSize);
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ g_NtOsKrnlInfo.fOkay = false;
+ RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlNtInit: Exception in rtR0DbgKrnlNtParseModule parsing ntoskrnl.exe...\n"));
+ }
+
+ //RTR0DBG_NT_DEBUG_LOG(("rtR0DbgKrnlNtInit: Parsing HAL...\n"));
+ __try
+ {
+ g_HalInfo.fOkay = rtR0DbgKrnlNtParseModule(&g_HalInfo, (uint8_t const *)pInfo->Modules[idxHal].ImageBase,
+ pInfo->Modules[idxHal].ImageSize);
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ g_HalInfo.fOkay = false;
+ RTR0DBG_NT_ERROR_LOG(("rtR0DbgKrnlNtInit: Exception in rtR0DbgKrnlNtParseModule parsing hal.dll...\n"));
+ }
+ if (!g_NtOsKrnlInfo.fOkay || !g_HalInfo.fOkay)
+ rc = VERR_LDR_GENERAL_FAILURE;
+
+ /*
+ * Resolve symbols we may need in the NT kernel (provided it parsed successfully)
+ */
+ if (g_NtOsKrnlInfo.fOkay)
+ {
+ if (!g_pfnMmGetSystemRoutineAddress)
+ {
+ //RTR0DBG_NT_DEBUG_LOG(("rtR0DbgKrnlNtInit: Looking up 'MmGetSystemRoutineAddress'...\n"));
+ rtR0DbgKrnlInfoLookupSymbol(&g_NtOsKrnlInfo, "MmGetSystemRoutineAddress", (void **)&g_pfnMmGetSystemRoutineAddress);
+ }
+ }
+ }
+
+ /*
+ * If we're still good, search for the given module (optional).
+ */
+ if (RT_SUCCESS(rc) && pModInfo)
+ {
+ RTR0DBG_NT_DEBUG_LOG(("rtR0DbgKrnlNtInit: Locating module '%s'...\n", pModInfo->szName));
+ rc = VERR_MODULE_NOT_FOUND;
+ for (uint32_t iModule = 0; iModule < cModules; iModule++)
+ {
+ RTL_PROCESS_MODULE_INFORMATION const * const pModule = &pInfo->Modules[iModule];
+ if ( (uintptr_t)pModule->ImageBase >= uMinKernelAddr
+ && (uintptr_t)pModule->ImageSize >= _4K)
+ {
+ const char *pszName = (const char *)&pModule->FullPathName[pModule->OffsetToFileName];
+ if ( pModInfo->pbImageBase == NULL
+ && RTStrICmpAscii(pszName, pModInfo->szName) == 0)
+ {
+ /*
+ * Found the module, try parse it.
+ */
+ __try
+ {
+ pModInfo->fOkay = rtR0DbgKrnlNtParseModule(pModInfo, (uint8_t const *)pModule->ImageBase,
+ pModule->ImageSize);
+ rc = VINF_SUCCESS;
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ pModInfo->fOkay = false;
+ rc = VERR_BAD_EXE_FORMAT;
+ }
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ RTR0DBG_NT_DEBUG_LOG(("rtR0DbgKrnlNtInit: returns %d\n", rc));
+ RTMemFree(pInfo);
+ return rc;
+}
+
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoOpen(PRTDBGKRNLINFO phKrnlInfo, uint32_t fFlags)
+{
+ AssertReturn(!fFlags, VERR_INVALID_FLAGS);
+
+ RTDBGKRNLINFOINT *pThis = (RTDBGKRNLINFOINT *)RTMemAllocZ(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTDBGKRNLINFO_MAGIC;
+ pThis->cRefs = 1;
+ *phKrnlInfo = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+RTR0DECL(uint32_t) RTR0DbgKrnlInfoRetain(RTDBGKRNLINFO hKrnlInfo)
+{
+ RTDBGKRNLINFOINT *pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, UINT32_MAX);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+ return cRefs;
+}
+
+
+static void rtR0DbgKrnlNtDtor(RTDBGKRNLINFOINT *pThis)
+{
+ pThis->u32Magic = ~RTDBGKRNLINFO_MAGIC;
+ RTMemFree(pThis);
+}
+
+
+RTR0DECL(uint32_t) RTR0DbgKrnlInfoRelease(RTDBGKRNLINFO hKrnlInfo)
+{
+ RTDBGKRNLINFOINT *pThis = hKrnlInfo;
+ if (pThis == NIL_RTDBGKRNLINFO)
+ return 0;
+ AssertPtrReturn(pThis, UINT32_MAX);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicDecU32(&pThis->cRefs);
+ if (cRefs == 0)
+ rtR0DbgKrnlNtDtor(pThis);
+ return cRefs;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoQueryMember(RTDBGKRNLINFO hKrnlInfo, const char *pszModule, const char *pszStructure,
+ const char *pszMember, size_t *poffMember)
+{
+ RTDBGKRNLINFOINT *pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertPtrReturn(pszMember, VERR_INVALID_POINTER);
+ AssertPtrNullReturn(pszModule, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszStructure, VERR_INVALID_POINTER);
+ AssertPtrReturn(poffMember, VERR_INVALID_POINTER);
+ return VERR_NOT_FOUND;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoQuerySymbol(RTDBGKRNLINFO hKrnlInfo, const char *pszModule, const char *pszSymbol, void **ppvSymbol)
+{
+ RTDBGKRNLINFOINT *pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertPtrReturn(pszSymbol, VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(ppvSymbol, VERR_INVALID_PARAMETER);
+
+ RTR0DBG_NT_DEBUG_LOG(("RTR0DbgKrnlInfoQuerySymbol: pszModule=%s pszSymbol=%s\n", pszModule ? pszModule : "<null>", pszSymbol));
+
+ void *pvTmpSymbol = NULL;
+ if (!ppvSymbol)
+ ppvSymbol = &pvTmpSymbol;
+
+ int rc;
+ if (!pszModule)
+ {
+ /*
+ * Search both ntoskrnl and hal, may use MmGetSystemRoutineAddress as fallback.
+ * Note! MmGetSystemRoutineAddress was buggy before XP SP2 according to Geoff Chappell.
+ */
+ if (g_NtOsKrnlInfo.pbImageBase)
+ rc = VINF_SUCCESS;
+ else
+ rc = rtR0DbgKrnlNtInit(NULL);
+ if (RT_SUCCESS(rc))
+ {
+ Assert(g_NtOsKrnlInfo.fOkay);
+ Assert(g_HalInfo.fOkay);
+ //RTR0DBG_NT_DEBUG_LOG(("RTR0DbgKrnlInfoQuerySymbol: Calling RTR0DbgKrnlInfoQuerySymbol on NT kernel...\n"));
+ rc = rtR0DbgKrnlInfoLookupSymbol(&g_NtOsKrnlInfo, pszSymbol, ppvSymbol);
+ if (RT_FAILURE(rc))
+ {
+ //RTR0DBG_NT_DEBUG_LOG(("RTR0DbgKrnlInfoQuerySymbol: Calling RTR0DbgKrnlInfoQuerySymbol on HAL kernel...\n"));
+ rc = rtR0DbgKrnlInfoLookupSymbol(&g_HalInfo, pszSymbol, ppvSymbol);
+ }
+ RTR0DBG_NT_DEBUG_LOG(("RTR0DbgKrnlInfoQuerySymbol: #1 returns %d *ppvSymbol=%p\n", rc, *ppvSymbol));
+ }
+ else
+ {
+ /* Init failed. Try resolve symbol, but preserve the status code up to a point. */
+ int rc2 = VERR_SYMBOL_NOT_FOUND;
+ if (g_NtOsKrnlInfo.fOkay)
+ rc2 = rtR0DbgKrnlInfoLookupSymbol(&g_NtOsKrnlInfo, pszSymbol, ppvSymbol);
+ if (g_HalInfo.fOkay && rc2 == VERR_SYMBOL_NOT_FOUND)
+ rc2 = rtR0DbgKrnlInfoLookupSymbol(&g_HalInfo, pszSymbol, ppvSymbol);
+ if ( rc2 == VERR_SYMBOL_NOT_FOUND
+ && g_pfnMmGetSystemRoutineAddress)
+ {
+ /* We'll overwrite init failure status code here since
+ MmGetSystemRoutineAddress will do the job for us. */
+ size_t cwcSymbol;
+ PRTUTF16 pwszSymbol = NULL;
+ rc = RTStrToUtf16Ex(pszSymbol, RTSTR_MAX, &pwszSymbol, 0, &cwcSymbol);
+ if (RT_SUCCESS(rc))
+ {
+ UNICODE_STRING UniStr;
+ UniStr.Buffer = pwszSymbol;
+ UniStr.Length = (uint16_t)(cwcSymbol * sizeof(RTUTF16));
+ UniStr.MaximumLength = UniStr.Length + sizeof(RTUTF16);
+ *ppvSymbol = g_pfnMmGetSystemRoutineAddress(&UniStr);
+ if (*ppvSymbol)
+ rc = VINF_SUCCESS;
+ else
+ rc = VERR_SYMBOL_NOT_FOUND;
+ RTUtf16Free(pwszSymbol);
+ RTR0DBG_NT_DEBUG_LOG(("RTR0DbgKrnlInfoQuerySymbol: #2 returns %d *ppvSymbol=%p\n", rc, *ppvSymbol));
+ }
+ }
+ }
+ }
+ else
+ {
+ /*
+ * Search specified module.
+ */
+ rc = VERR_MODULE_NOT_FOUND;
+ PRTDBGNTKRNLMODINFO pModInfo;
+ if (RTStrICmpAscii(pszModule, g_NtOsKrnlInfo.szName) == 0)
+ pModInfo = &g_NtOsKrnlInfo;
+ else if (RTStrICmpAscii(pszModule, g_HalInfo.szName) == 0)
+ pModInfo = &g_NtOsKrnlInfo;
+ else
+ {
+ pModInfo = NULL;
+ for (unsigned i = 0; i < pThis->cModules; i++)
+ if (RTStrICmpAscii(pszModule, pThis->aModules[i].szName) == 0)
+ {
+ pModInfo = &pThis->aModules[i];
+ break;
+ }
+ if (!pModInfo)
+ {
+ /*
+ * Not found, try load it. If module table is full, drop the first
+ * entry and shuffle the other up to make space.
+ */
+ size_t const cchModule = strlen(pszModule);
+ RTDBGNTKRNLMODINFO NewModInfo;
+ if (cchModule < sizeof(NewModInfo.szName))
+ {
+ RT_ZERO(NewModInfo);
+ memcpy(NewModInfo.szName, pszModule, cchModule);
+ NewModInfo.szName[cchModule] = '\0';
+
+ rc = rtR0DbgKrnlNtInit(&NewModInfo);
+ if (RT_SUCCESS(rc))
+ {
+ Assert(NewModInfo.fOkay);
+ uint32_t iModule = pThis->cModules;
+ if (iModule >= RT_ELEMENTS(pThis->aModules))
+ {
+ iModule = RT_ELEMENTS(pThis->aModules) - 1;
+ memmove(&pThis->aModules[0], &pThis->aModules[1], iModule * sizeof(pThis->aModules[0]));
+ }
+ pThis->aModules[iModule] = NewModInfo;
+ pThis->cModules = iModule + 1;
+ pModInfo = &pThis->aModules[iModule];
+ rc = VINF_SUCCESS;
+ }
+ }
+ else
+ {
+ AssertMsgFailed(("cchModule=%zu pszModule=%s\n", cchModule, pszModule));
+ rc = VERR_FILENAME_TOO_LONG;
+ }
+ }
+ }
+ if (pModInfo)
+ {
+ rc = rtR0DbgKrnlInfoLookupSymbol(pModInfo, pszSymbol, ppvSymbol);
+ RTR0DBG_NT_DEBUG_LOG(("RTR0DbgKrnlInfoQuerySymbol: #3 returns %d *ppvSymbol=%p\n", rc, *ppvSymbol));
+ }
+ }
+ return rc;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoQuerySize(RTDBGKRNLINFO hKrnlInfo, const char *pszModule, const char *pszType, size_t *pcbType)
+{
+ RTDBGKRNLINFOINT *pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertPtrNullReturn(pszModule, VERR_INVALID_POINTER);
+ AssertPtrReturn(pszType, VERR_INVALID_POINTER);
+ AssertPtrReturn(pcbType, VERR_INVALID_POINTER);
+ return VERR_NOT_FOUND;
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/initterm-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/initterm-r0drv-nt.cpp
new file mode 100644
index 00000000..33820e8c
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/initterm-r0drv-nt.cpp
@@ -0,0 +1,507 @@
+/* $Id: initterm-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Initialization & Termination, R0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/dbg.h>
+#include <iprt/errcore.h>
+#include <iprt/string.h>
+#include "internal/initterm.h"
+#include "internal-r0drv-nt.h"
+#include "symdb.h"
+#include "symdbdata.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** ExAllocatePoolWithTag, introduced in W2K. */
+decltype(ExAllocatePoolWithTag) *g_pfnrtExAllocatePoolWithTag;
+/** ExFreePoolWithTag, introduced in W2K. */
+decltype(ExFreePoolWithTag) *g_pfnrtExFreePoolWithTag;
+/** ExSetTimerResolution, introduced in W2K. */
+PFNMYEXSETTIMERRESOLUTION g_pfnrtNtExSetTimerResolution;
+/** KeFlushQueuedDpcs, introduced in XP. */
+PFNMYKEFLUSHQUEUEDDPCS g_pfnrtNtKeFlushQueuedDpcs;
+/** HalRequestIpi, version introduced with windows 7. */
+PFNHALREQUESTIPI_W7PLUS g_pfnrtHalRequestIpiW7Plus;
+/** HalRequestIpi, version valid up to windows vista?? */
+PFNHALREQUESTIPI_PRE_W7 g_pfnrtHalRequestIpiPreW7;
+/** Worker for RTMpPokeCpu. */
+PFNRTSENDIPI g_pfnrtMpPokeCpuWorker;
+/** KeIpiGenericCall - Introduced in Windows Server 2003. */
+PFNRTKEIPIGENERICCALL g_pfnrtKeIpiGenericCall;
+/** KeSetTargetProcessorDpcEx - Introduced in Windows 7. */
+PFNKESETTARGETPROCESSORDPCEX g_pfnrtKeSetTargetProcessorDpcEx;
+/** KeInitializeAffinityEx - Introducted in Windows 7. */
+PFNKEINITIALIZEAFFINITYEX g_pfnrtKeInitializeAffinityEx;
+/** KeAddProcessorAffinityEx - Introducted in Windows 7. */
+PFNKEADDPROCESSORAFFINITYEX g_pfnrtKeAddProcessorAffinityEx;
+/** KeGetProcessorIndexFromNumber - Introducted in Windows 7. */
+PFNKEGETPROCESSORINDEXFROMNUMBER g_pfnrtKeGetProcessorIndexFromNumber;
+/** KeGetProcessorNumberFromIndex - Introducted in Windows 7. */
+PFNKEGETPROCESSORNUMBERFROMINDEX g_pfnrtKeGetProcessorNumberFromIndex;
+/** KeGetCurrentProcessorNumberEx - Introducted in Windows 7. */
+PFNKEGETCURRENTPROCESSORNUMBEREX g_pfnrtKeGetCurrentProcessorNumberEx;
+/** KeQueryActiveProcessors - Introducted in Windows 2000. */
+PFNKEQUERYACTIVEPROCESSORS g_pfnrtKeQueryActiveProcessors;
+/** KeQueryMaximumProcessorCount - Introducted in Vista and obsoleted W7. */
+PFNKEQUERYMAXIMUMPROCESSORCOUNT g_pfnrtKeQueryMaximumProcessorCount;
+/** KeQueryMaximumProcessorCountEx - Introducted in Windows 7. */
+PFNKEQUERYMAXIMUMPROCESSORCOUNTEX g_pfnrtKeQueryMaximumProcessorCountEx;
+/** KeQueryMaximumGroupCount - Introducted in Windows 7. */
+PFNKEQUERYMAXIMUMGROUPCOUNT g_pfnrtKeQueryMaximumGroupCount;
+/** KeQueryActiveProcessorCount - Introducted in Vista and obsoleted W7. */
+PFNKEQUERYACTIVEPROCESSORCOUNT g_pfnrtKeQueryActiveProcessorCount;
+/** KeQueryActiveProcessorCountEx - Introducted in Windows 7. */
+PFNKEQUERYACTIVEPROCESSORCOUNTEX g_pfnrtKeQueryActiveProcessorCountEx;
+/** KeQueryLogicalProcessorRelationship - Introducted in Windows 7. */
+PFNKEQUERYLOGICALPROCESSORRELATIONSHIP g_pfnrtKeQueryLogicalProcessorRelationship;
+/** KeRegisterProcessorChangeCallback - Introducted in Windows 7. */
+PFNKEREGISTERPROCESSORCHANGECALLBACK g_pfnrtKeRegisterProcessorChangeCallback;
+/** KeDeregisterProcessorChangeCallback - Introducted in Windows 7. */
+PFNKEDEREGISTERPROCESSORCHANGECALLBACK g_pfnrtKeDeregisterProcessorChangeCallback;
+/** KeSetImportanceDpc - Introducted in NT 3.51. */
+decltype(KeSetImportanceDpc) *g_pfnrtKeSetImportanceDpc;
+/** KeSetTargetProcessorDpc - Introducted in NT 3.51. */
+decltype(KeSetTargetProcessorDpc) *g_pfnrtKeSetTargetProcessorDpc;
+/** KeInitializeTimerEx - Introduced in NT 4. */
+decltype(KeInitializeTimerEx) *g_pfnrtKeInitializeTimerEx;
+/** KeShouldYieldProcessor - Introduced in Windows 10. */
+PFNKESHOULDYIELDPROCESSOR g_pfnrtKeShouldYieldProcessor;
+/** Pointer to the MmProtectMdlSystemAddress kernel function if it's available.
+ * This API was introduced in XP. */
+decltype(MmProtectMdlSystemAddress) *g_pfnrtMmProtectMdlSystemAddress;
+/** MmAllocatePagesForMdl - Introduced in Windows 2000. */
+decltype(MmAllocatePagesForMdl) *g_pfnrtMmAllocatePagesForMdl;
+/** MmFreePagesFromMdl - Introduced in Windows 2000. */
+decltype(MmFreePagesFromMdl) *g_pfnrtMmFreePagesFromMdl;
+/** MmMapLockedPagesSpecifyCache - Introduced in Windows NT4 SP4. */
+decltype(MmMapLockedPagesSpecifyCache) *g_pfnrtMmMapLockedPagesSpecifyCache;
+/** MmAllocateContiguousMemorySpecifyCache - Introduced in Windows 2000. */
+decltype(MmAllocateContiguousMemorySpecifyCache) *g_pfnrtMmAllocateContiguousMemorySpecifyCache;
+/** MmSecureVirtualMemory - Introduced in NT 3.51. */
+decltype(MmSecureVirtualMemory) *g_pfnrtMmSecureVirtualMemory;
+/** MmUnsecureVirtualMemory - Introduced in NT 3.51. */
+decltype(MmUnsecureVirtualMemory) *g_pfnrtMmUnsecureVirtualMemory;
+/** RtlGetVersion, introduced in ??. */
+PFNRTRTLGETVERSION g_pfnrtRtlGetVersion;
+#ifdef RT_ARCH_X86
+/** KeQueryInterruptTime - exported/new in Windows 2000. */
+PFNRTKEQUERYINTERRUPTTIME g_pfnrtKeQueryInterruptTime;
+#endif
+/** KeQueryInterruptTimePrecise - new in Windows 8. */
+PFNRTKEQUERYINTERRUPTTIMEPRECISE g_pfnrtKeQueryInterruptTimePrecise;
+/** KeQuerySystemTimePrecise - new in Windows 8. */
+PFNRTKEQUERYSYSTEMTIMEPRECISE g_pfnrtKeQuerySystemTimePrecise;
+
+/** Offset of the _KPRCB::QuantumEnd field. 0 if not found. */
+uint32_t g_offrtNtPbQuantumEnd;
+/** Size of the _KPRCB::QuantumEnd field. 0 if not found. */
+uint32_t g_cbrtNtPbQuantumEnd;
+/** Offset of the _KPRCB::DpcQueueDepth field. 0 if not found. */
+uint32_t g_offrtNtPbDpcQueueDepth;
+
+/** The combined NT version, see RTNT_MAKE_VERSION. */
+uint32_t g_uRtNtVersion = RTNT_MAKE_VERSION(4, 0);
+/** The major version number. */
+uint8_t g_uRtNtMajorVer;
+/** The minor version number. */
+uint8_t g_uRtNtMinorVer;
+/** The build number. */
+uint32_t g_uRtNtBuildNo;
+
+/** Pointer to the MmHighestUserAddress kernel variable - can be NULL. */
+uintptr_t const *g_puRtMmHighestUserAddress;
+/** Pointer to the MmSystemRangeStart kernel variable - can be NULL. */
+uintptr_t const *g_puRtMmSystemRangeStart;
+
+
+/**
+ * Determines the NT kernel verison information.
+ *
+ * @param pOsVerInfo Where to return the version information.
+ *
+ * @remarks pOsVerInfo->fSmp is only definitive if @c true.
+ * @remarks pOsVerInfo->uCsdNo is set to MY_NIL_CSD if it cannot be determined.
+ */
+static void rtR0NtGetOsVersionInfo(PRTNTSDBOSVER pOsVerInfo)
+{
+ ULONG ulMajorVersion = 0;
+ ULONG ulMinorVersion = 0;
+ ULONG ulBuildNumber = 0;
+
+ pOsVerInfo->fChecked = PsGetVersion(&ulMajorVersion, &ulMinorVersion, &ulBuildNumber, NULL) == TRUE;
+ pOsVerInfo->uMajorVer = (uint8_t)ulMajorVersion;
+ pOsVerInfo->uMinorVer = (uint8_t)ulMinorVersion;
+ pOsVerInfo->uBuildNo = ulBuildNumber;
+#define MY_NIL_CSD 0x3f
+ pOsVerInfo->uCsdNo = MY_NIL_CSD;
+
+ if (g_pfnrtRtlGetVersion)
+ {
+ RTL_OSVERSIONINFOEXW VerInfo;
+ RT_ZERO(VerInfo);
+ VerInfo.dwOSVersionInfoSize = sizeof(VerInfo);
+
+ NTSTATUS rcNt = g_pfnrtRtlGetVersion(&VerInfo);
+ if (NT_SUCCESS(rcNt))
+ pOsVerInfo->uCsdNo = VerInfo.wServicePackMajor;
+ }
+
+ /* Note! We cannot quite say if something is MP or UNI. So, fSmp is
+ redefined to indicate that it must be MP.
+ Note! RTMpGetCount is not available here. */
+ pOsVerInfo->fSmp = ulMajorVersion >= 6; /* Vista and later has no UNI kernel AFAIK. */
+ if (!pOsVerInfo->fSmp)
+ {
+ if ( g_pfnrtKeQueryMaximumProcessorCountEx
+ && g_pfnrtKeQueryMaximumProcessorCountEx(ALL_PROCESSOR_GROUPS) > 1)
+ pOsVerInfo->fSmp = true;
+ else if ( g_pfnrtKeQueryMaximumProcessorCount
+ && g_pfnrtKeQueryMaximumProcessorCount() > 1)
+ pOsVerInfo->fSmp = true;
+ else if ( g_pfnrtKeQueryActiveProcessors
+ && g_pfnrtKeQueryActiveProcessors() > 1)
+ pOsVerInfo->fSmp = true;
+ else if (KeNumberProcessors > 1)
+ pOsVerInfo->fSmp = true;
+ }
+}
+
+
+/**
+ * Tries a set against the current kernel.
+ *
+ * @retval true if it matched up, global variables are updated.
+ * @retval false otherwise (no globals updated).
+ * @param pSet The data set.
+ * @param pbPrcb Pointer to the processor control block.
+ * @param pszVendor Pointer to the processor vendor string.
+ * @param pOsVerInfo The OS version info.
+ */
+static bool rtR0NtTryMatchSymSet(PCRTNTSDBSET pSet, uint8_t *pbPrcb, const char *pszVendor, PCRTNTSDBOSVER pOsVerInfo)
+{
+ /*
+ * Don't bother trying stuff where the NT kernel version number differs, or
+ * if the build type or SMPness doesn't match up.
+ */
+ if ( pSet->OsVerInfo.uMajorVer != pOsVerInfo->uMajorVer
+ || pSet->OsVerInfo.uMinorVer != pOsVerInfo->uMinorVer
+ || pSet->OsVerInfo.fChecked != pOsVerInfo->fChecked
+ || (!pSet->OsVerInfo.fSmp && pOsVerInfo->fSmp /*must-be-smp*/) )
+ {
+ //DbgPrint("IPRT: #%d Version/type mismatch.\n", pSet - &g_artNtSdbSets[0]);
+ return false;
+ }
+
+ /*
+ * Do the CPU vendor test.
+ *
+ * Note! The MmIsAddressValid call is the real #PF security here as the
+ * __try/__except has limited/no ability to catch everything we need.
+ */
+ char *pszPrcbVendorString = (char *)&pbPrcb[pSet->KPRCB.offVendorString];
+ if (!MmIsAddressValid(&pszPrcbVendorString[4 * 3 - 1]))
+ {
+ //DbgPrint("IPRT: #%d invalid vendor string address.\n", pSet - &g_artNtSdbSets[0]);
+ return false;
+ }
+ __try
+ {
+ if (memcmp(pszPrcbVendorString, pszVendor, RT_MIN(4 * 3, pSet->KPRCB.cbVendorString)) != 0)
+ {
+ //DbgPrint("IPRT: #%d Vendor string mismatch.\n", pSet - &g_artNtSdbSets[0]);
+ return false;
+ }
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ DbgPrint("IPRT: %#d Exception\n", pSet - &g_artNtSdbSets[0]);
+ return false;
+ }
+
+ /*
+ * Got a match, update the global variables and report succcess.
+ */
+ g_offrtNtPbQuantumEnd = pSet->KPRCB.offQuantumEnd;
+ g_cbrtNtPbQuantumEnd = pSet->KPRCB.cbQuantumEnd;
+ g_offrtNtPbDpcQueueDepth = pSet->KPRCB.offDpcQueueDepth;
+
+#if 0
+ DbgPrint("IPRT: Using data set #%u for %u.%usp%u build %u %s %s.\n",
+ pSet - &g_artNtSdbSets[0],
+ pSet->OsVerInfo.uMajorVer,
+ pSet->OsVerInfo.uMinorVer,
+ pSet->OsVerInfo.uCsdNo,
+ pSet->OsVerInfo.uBuildNo,
+ pSet->OsVerInfo.fSmp ? "smp" : "uni",
+ pSet->OsVerInfo.fChecked ? "checked" : "free");
+#endif
+ return true;
+}
+
+
+DECLHIDDEN(int) rtR0InitNative(void)
+{
+ /*
+ * Preinitialize g_uRtNtVersion so RTMemAlloc uses the right kind of pool
+ * when RTR0DbgKrnlInfoOpen calls it.
+ */
+ RTNTSDBOSVER OsVerInfo;
+ rtR0NtGetOsVersionInfo(&OsVerInfo);
+ g_uRtNtVersion = RTNT_MAKE_VERSION(OsVerInfo.uMajorVer, OsVerInfo.uMinorVer);
+ g_uRtNtMinorVer = OsVerInfo.uMinorVer;
+ g_uRtNtMajorVer = OsVerInfo.uMajorVer;
+ g_uRtNtBuildNo = OsVerInfo.uBuildNo;
+
+ /*
+ * Initialize the function pointers.
+ */
+ RTDBGKRNLINFO hKrnlInfo;
+ int rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0/*fFlags*/);
+ AssertRCReturn(rc, rc);
+
+#define GET_SYSTEM_ROUTINE_EX(a_Prf, a_Name, a_pfnType) \
+ do { RT_CONCAT3(g_pfnrt, a_Prf, a_Name) = (a_pfnType)RTR0DbgKrnlInfoGetSymbol(hKrnlInfo, NULL, #a_Name); } while (0)
+#define GET_SYSTEM_ROUTINE(a_Name) GET_SYSTEM_ROUTINE_EX(RT_NOTHING, a_Name, decltype(a_Name) *)
+#define GET_SYSTEM_ROUTINE_PRF(a_Prf,a_Name) GET_SYSTEM_ROUTINE_EX(a_Prf, a_Name, decltype(a_Name) *)
+#define GET_SYSTEM_ROUTINE_TYPE(a_Name, a_pfnType) GET_SYSTEM_ROUTINE_EX(RT_NOTHING, a_Name, a_pfnType)
+
+ GET_SYSTEM_ROUTINE(ExAllocatePoolWithTag);
+ GET_SYSTEM_ROUTINE(ExFreePoolWithTag);
+ GET_SYSTEM_ROUTINE_PRF(Nt,ExSetTimerResolution);
+ GET_SYSTEM_ROUTINE_PRF(Nt,KeFlushQueuedDpcs);
+ GET_SYSTEM_ROUTINE(KeIpiGenericCall);
+ GET_SYSTEM_ROUTINE(KeSetTargetProcessorDpcEx);
+ GET_SYSTEM_ROUTINE(KeInitializeAffinityEx);
+ GET_SYSTEM_ROUTINE(KeAddProcessorAffinityEx);
+ GET_SYSTEM_ROUTINE_TYPE(KeGetProcessorIndexFromNumber, PFNKEGETPROCESSORINDEXFROMNUMBER);
+ GET_SYSTEM_ROUTINE(KeGetProcessorNumberFromIndex);
+ GET_SYSTEM_ROUTINE_TYPE(KeGetCurrentProcessorNumberEx, PFNKEGETCURRENTPROCESSORNUMBEREX);
+ GET_SYSTEM_ROUTINE(KeQueryActiveProcessors);
+ GET_SYSTEM_ROUTINE(KeQueryMaximumProcessorCount);
+ GET_SYSTEM_ROUTINE(KeQueryMaximumProcessorCountEx);
+ GET_SYSTEM_ROUTINE(KeQueryMaximumGroupCount);
+ GET_SYSTEM_ROUTINE(KeQueryActiveProcessorCount);
+ GET_SYSTEM_ROUTINE(KeQueryActiveProcessorCountEx);
+ GET_SYSTEM_ROUTINE(KeQueryLogicalProcessorRelationship);
+ GET_SYSTEM_ROUTINE(KeRegisterProcessorChangeCallback);
+ GET_SYSTEM_ROUTINE(KeDeregisterProcessorChangeCallback);
+ GET_SYSTEM_ROUTINE(KeSetImportanceDpc);
+ GET_SYSTEM_ROUTINE(KeSetTargetProcessorDpc);
+ GET_SYSTEM_ROUTINE(KeInitializeTimerEx);
+ GET_SYSTEM_ROUTINE_TYPE(KeShouldYieldProcessor, PFNKESHOULDYIELDPROCESSOR);
+ GET_SYSTEM_ROUTINE(MmProtectMdlSystemAddress);
+ GET_SYSTEM_ROUTINE(MmAllocatePagesForMdl);
+ GET_SYSTEM_ROUTINE(MmFreePagesFromMdl);
+ GET_SYSTEM_ROUTINE(MmMapLockedPagesSpecifyCache);
+ GET_SYSTEM_ROUTINE(MmAllocateContiguousMemorySpecifyCache);
+ GET_SYSTEM_ROUTINE(MmSecureVirtualMemory);
+ GET_SYSTEM_ROUTINE(MmUnsecureVirtualMemory);
+
+ GET_SYSTEM_ROUTINE_TYPE(RtlGetVersion, PFNRTRTLGETVERSION);
+#ifdef RT_ARCH_X86
+ GET_SYSTEM_ROUTINE(KeQueryInterruptTime);
+#endif
+ GET_SYSTEM_ROUTINE_TYPE(KeQueryInterruptTimePrecise, PFNRTKEQUERYINTERRUPTTIMEPRECISE);
+ GET_SYSTEM_ROUTINE_TYPE(KeQuerySystemTimePrecise, PFNRTKEQUERYSYSTEMTIMEPRECISE);
+
+ g_pfnrtHalRequestIpiW7Plus = (PFNHALREQUESTIPI_W7PLUS)RTR0DbgKrnlInfoGetSymbol(hKrnlInfo, NULL, "HalRequestIpi");
+ g_pfnrtHalRequestIpiPreW7 = (PFNHALREQUESTIPI_PRE_W7)g_pfnrtHalRequestIpiW7Plus;
+
+ g_puRtMmHighestUserAddress = (uintptr_t const *)RTR0DbgKrnlInfoGetSymbol(hKrnlInfo, NULL, "MmHighestUserAddress");
+ g_puRtMmSystemRangeStart = (uintptr_t const *)RTR0DbgKrnlInfoGetSymbol(hKrnlInfo, NULL, "MmSystemRangeStart");
+
+#ifdef RT_ARCH_X86
+ rc = rtR0Nt3InitSymbols(hKrnlInfo);
+ RTR0DbgKrnlInfoRelease(hKrnlInfo);
+ if (RT_FAILURE(rc))
+ return rc;
+#else
+ RTR0DbgKrnlInfoRelease(hKrnlInfo);
+#endif
+
+ /*
+ * Get and publish the definitive NT version.
+ */
+ rtR0NtGetOsVersionInfo(&OsVerInfo);
+ g_uRtNtVersion = RTNT_MAKE_VERSION(OsVerInfo.uMajorVer, OsVerInfo.uMinorVer);
+ g_uRtNtMinorVer = OsVerInfo.uMinorVer;
+ g_uRtNtMajorVer = OsVerInfo.uMajorVer;
+ g_uRtNtBuildNo = OsVerInfo.uBuildNo;
+
+
+ /*
+ * HACK ALERT! (and déjà vu warning - remember win32k.sys on OS/2?)
+ *
+ * Try find _KPRCB::QuantumEnd and _KPRCB::[DpcData.]DpcQueueDepth.
+ * For purpose of verification we use the VendorString member (12+1 chars).
+ *
+ * The offsets was initially derived by poking around with windbg
+ * (dt _KPRCB, !prcb ++, and such like). Systematic harvesting was then
+ * planned using dia2dump, grep and the symbol pack in a manner like this:
+ * dia2dump -type _KDPC_DATA -type _KPRCB EXE\ntkrnlmp.pdb | grep -wE "QuantumEnd|DpcData|DpcQueueDepth|VendorString"
+ *
+ * The final solution ended up using a custom harvester program called
+ * ntBldSymDb that recursively searches thru unpacked symbol packages for
+ * the desired structure offsets. The program assumes that the packages
+ * are unpacked into directories with the same name as the package, with
+ * exception of some of the w2k packages which requires a 'w2k' prefix to
+ * be distinguishable from another.
+ */
+
+ /*
+ * Gather consistent CPU vendor string and PRCB pointers.
+ */
+ KIRQL OldIrql;
+ KeRaiseIrql(DISPATCH_LEVEL, &OldIrql); /* make sure we stay on the same cpu */
+
+ union
+ {
+ uint32_t auRegs[4];
+ char szVendor[4*3+1];
+ } u;
+ ASMCpuId(0, &u.auRegs[3], &u.auRegs[0], &u.auRegs[2], &u.auRegs[1]);
+ u.szVendor[4*3] = '\0';
+
+ uint8_t *pbPrcb;
+ __try /* Warning. This try/except statement may provide some false safety. */
+ {
+#if defined(RT_ARCH_X86)
+ PKPCR pPcr = (PKPCR)__readfsdword(RT_UOFFSETOF(KPCR,SelfPcr));
+ pbPrcb = (uint8_t *)pPcr->Prcb;
+#elif defined(RT_ARCH_AMD64)
+ PKPCR pPcr = (PKPCR)__readgsqword(RT_UOFFSETOF(KPCR,Self));
+ pbPrcb = (uint8_t *)pPcr->CurrentPrcb;
+#else
+# error "port me"
+ pbPrcb = NULL;
+#endif
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ pbPrcb = NULL;
+ }
+
+ /*
+ * Search the database
+ */
+ if (pbPrcb)
+ {
+ /* Find the best matching kernel version based on build number. */
+ uint32_t iBest = UINT32_MAX;
+ int32_t iBestDelta = INT32_MAX;
+ for (uint32_t i = 0; i < RT_ELEMENTS(g_artNtSdbSets); i++)
+ {
+ if (g_artNtSdbSets[i].OsVerInfo.fChecked != OsVerInfo.fChecked)
+ continue;
+ if (OsVerInfo.fSmp /*must-be-smp*/ && !g_artNtSdbSets[i].OsVerInfo.fSmp)
+ continue;
+
+ int32_t iDelta = RT_ABS((int32_t)OsVerInfo.uBuildNo - (int32_t)g_artNtSdbSets[i].OsVerInfo.uBuildNo);
+ if ( iDelta == 0
+ && (g_artNtSdbSets[i].OsVerInfo.uCsdNo == OsVerInfo.uCsdNo || OsVerInfo.uCsdNo == MY_NIL_CSD))
+ {
+ /* prefect */
+ iBestDelta = iDelta;
+ iBest = i;
+ break;
+ }
+ if ( iDelta < iBestDelta
+ || iBest == UINT32_MAX
+ || ( iDelta == iBestDelta
+ && OsVerInfo.uCsdNo != MY_NIL_CSD
+ && RT_ABS(g_artNtSdbSets[i ].OsVerInfo.uCsdNo - (int32_t)OsVerInfo.uCsdNo)
+ < RT_ABS(g_artNtSdbSets[iBest].OsVerInfo.uCsdNo - (int32_t)OsVerInfo.uCsdNo)
+ )
+ )
+ {
+ iBestDelta = iDelta;
+ iBest = i;
+ }
+ }
+ if (iBest < RT_ELEMENTS(g_artNtSdbSets))
+ {
+ /* Try all sets: iBest -> End; iBest -> Start. */
+ bool fDone = false;
+ int32_t i = iBest;
+ while ( i < RT_ELEMENTS(g_artNtSdbSets)
+ && !(fDone = rtR0NtTryMatchSymSet(&g_artNtSdbSets[i], pbPrcb, u.szVendor, &OsVerInfo)))
+ i++;
+ if (!fDone)
+ {
+ i = (int32_t)iBest - 1;
+ while ( i >= 0
+ && !(fDone = rtR0NtTryMatchSymSet(&g_artNtSdbSets[i], pbPrcb, u.szVendor, &OsVerInfo)))
+ i--;
+ }
+ }
+ else
+ DbgPrint("IPRT: Failed to locate data set.\n");
+ }
+ else
+ DbgPrint("IPRT: Failed to get PCBR pointer.\n");
+
+ KeLowerIrql(OldIrql); /* Lowering the IRQL early in the hope that we may catch exceptions below. */
+
+#ifndef IN_GUEST
+ if (!g_offrtNtPbQuantumEnd && !g_offrtNtPbDpcQueueDepth)
+ DbgPrint("IPRT: Neither _KPRCB::QuantumEnd nor _KPRCB::DpcQueueDepth was not found! Kernel %u.%u %u %s\n",
+ OsVerInfo.uMajorVer, OsVerInfo.uMinorVer, OsVerInfo.uBuildNo, OsVerInfo.fChecked ? "checked" : "free");
+# ifdef DEBUG
+ else
+ DbgPrint("IPRT: _KPRCB:{.QuantumEnd=%x/%d, .DpcQueueDepth=%x/%d} Kernel %u.%u %u %s\n",
+ g_offrtNtPbQuantumEnd, g_cbrtNtPbQuantumEnd, g_offrtNtPbDpcQueueDepth, g_offrtNtPbDpcQueueDepth,
+ OsVerInfo.uMajorVer, OsVerInfo.uMinorVer, OsVerInfo.uBuildNo, OsVerInfo.fChecked ? "checked" : "free");
+# endif
+#endif
+
+ /*
+ * Initialize multi processor stuff. This registers a callback, so
+ * we call rtR0TermNative to do the deregistration on failure.
+ */
+ rc = rtR0MpNtInit(&OsVerInfo);
+ if (RT_FAILURE(rc))
+ {
+ rtR0TermNative();
+ DbgPrint("IPRT: Fatal: rtR0MpNtInit failed: %d\n", rc);
+ return rc;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0TermNative(void)
+{
+ rtR0MpNtTerm();
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/internal-r0drv-nt.h b/src/VBox/Runtime/r0drv/nt/internal-r0drv-nt.h
new file mode 100644
index 00000000..56ecf233
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/internal-r0drv-nt.h
@@ -0,0 +1,139 @@
+/* $Id: internal-r0drv-nt.h $ */
+/** @file
+ * IPRT - Internal Header for the NT Ring-0 Driver Code.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_nt_internal_r0drv_nt_h
+#define IPRT_INCLUDED_SRC_r0drv_nt_internal_r0drv_nt_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/cpuset.h>
+#include <iprt/nt/nt.h>
+
+RT_C_DECLS_BEGIN
+
+/*******************************************************************************
+* Structures and Typedefs *
+*******************************************************************************/
+typedef ULONG (__stdcall *PFNMYEXSETTIMERRESOLUTION)(ULONG, BOOLEAN);
+typedef VOID (__stdcall *PFNMYKEFLUSHQUEUEDDPCS)(VOID);
+typedef VOID (__stdcall *PFNHALSENDSOFTWAREINTERRUPT)(ULONG ProcessorNumber, KIRQL Irql);
+typedef int (__stdcall *PFNRTSENDIPI)(RTCPUID idCpu);
+typedef ULONG_PTR (__stdcall *PFNRTKEIPIGENERICCALL)(PKIPI_BROADCAST_WORKER BroadcastFunction, ULONG_PTR Context);
+typedef ULONG (__stdcall *PFNRTRTLGETVERSION)(PRTL_OSVERSIONINFOEXW pVerInfo);
+#ifndef RT_ARCH_AMD64
+typedef ULONGLONG (__stdcall *PFNRTKEQUERYINTERRUPTTIME)(VOID);
+typedef VOID (__stdcall *PFNRTKEQUERYSYSTEMTIME)(PLARGE_INTEGER pTime);
+#endif
+typedef ULONG64 (__stdcall *PFNRTKEQUERYINTERRUPTTIMEPRECISE)(PULONG64 pQpcTS);
+typedef VOID (__stdcall *PFNRTKEQUERYSYSTEMTIMEPRECISE)(PLARGE_INTEGER pTime);
+
+
+/*******************************************************************************
+* Global Variables *
+*******************************************************************************/
+extern RTCPUSET g_rtMpNtCpuSet;
+extern uint32_t g_cRtMpNtMaxGroups;
+extern uint32_t g_cRtMpNtMaxCpus;
+extern RTCPUID g_aidRtMpNtByCpuSetIdx[RTCPUSET_MAX_CPUS];
+
+extern decltype(ExAllocatePoolWithTag) *g_pfnrtExAllocatePoolWithTag;
+extern decltype(ExFreePoolWithTag) *g_pfnrtExFreePoolWithTag;
+extern PFNMYEXSETTIMERRESOLUTION g_pfnrtNtExSetTimerResolution;
+extern PFNMYKEFLUSHQUEUEDDPCS g_pfnrtNtKeFlushQueuedDpcs;
+extern PFNHALREQUESTIPI_W7PLUS g_pfnrtHalRequestIpiW7Plus;
+extern PFNHALREQUESTIPI_PRE_W7 g_pfnrtHalRequestIpiPreW7;
+extern PFNHALSENDSOFTWAREINTERRUPT g_pfnrtNtHalSendSoftwareInterrupt;
+extern PFNRTSENDIPI g_pfnrtMpPokeCpuWorker;
+extern PFNRTKEIPIGENERICCALL g_pfnrtKeIpiGenericCall;
+extern PFNKESETTARGETPROCESSORDPCEX g_pfnrtKeSetTargetProcessorDpcEx;
+extern PFNKEINITIALIZEAFFINITYEX g_pfnrtKeInitializeAffinityEx;
+extern PFNKEADDPROCESSORAFFINITYEX g_pfnrtKeAddProcessorAffinityEx;
+extern PFNKEGETPROCESSORINDEXFROMNUMBER g_pfnrtKeGetProcessorIndexFromNumber;
+extern PFNKEGETPROCESSORNUMBERFROMINDEX g_pfnrtKeGetProcessorNumberFromIndex;
+extern PFNKEGETCURRENTPROCESSORNUMBEREX g_pfnrtKeGetCurrentProcessorNumberEx;
+extern PFNKEQUERYACTIVEPROCESSORS g_pfnrtKeQueryActiveProcessors;
+extern PFNKEQUERYMAXIMUMPROCESSORCOUNT g_pfnrtKeQueryMaximumProcessorCount;
+extern PFNKEQUERYMAXIMUMPROCESSORCOUNTEX g_pfnrtKeQueryMaximumProcessorCountEx;
+extern PFNKEQUERYMAXIMUMGROUPCOUNT g_pfnrtKeQueryMaximumGroupCount;
+extern PFNKEQUERYACTIVEPROCESSORCOUNT g_pfnrtKeQueryActiveProcessorCount;
+extern PFNKEQUERYACTIVEPROCESSORCOUNTEX g_pfnrtKeQueryActiveProcessorCountEx;
+extern PFNKEQUERYLOGICALPROCESSORRELATIONSHIP g_pfnrtKeQueryLogicalProcessorRelationship;
+extern PFNKEREGISTERPROCESSORCHANGECALLBACK g_pfnrtKeRegisterProcessorChangeCallback;
+extern PFNKEDEREGISTERPROCESSORCHANGECALLBACK g_pfnrtKeDeregisterProcessorChangeCallback;
+extern decltype(KeSetImportanceDpc) *g_pfnrtKeSetImportanceDpc;
+extern decltype(KeSetTargetProcessorDpc) *g_pfnrtKeSetTargetProcessorDpc;
+extern decltype(KeInitializeTimerEx) *g_pfnrtKeInitializeTimerEx;
+extern PFNKESHOULDYIELDPROCESSOR g_pfnrtKeShouldYieldProcessor;
+extern decltype(MmProtectMdlSystemAddress) *g_pfnrtMmProtectMdlSystemAddress;
+extern decltype(MmAllocatePagesForMdl) *g_pfnrtMmAllocatePagesForMdl;
+extern decltype(MmFreePagesFromMdl) *g_pfnrtMmFreePagesFromMdl;
+extern decltype(MmMapLockedPagesSpecifyCache) *g_pfnrtMmMapLockedPagesSpecifyCache;
+extern decltype(MmAllocateContiguousMemorySpecifyCache) *g_pfnrtMmAllocateContiguousMemorySpecifyCache;
+extern decltype(MmSecureVirtualMemory) *g_pfnrtMmSecureVirtualMemory;
+extern decltype(MmUnsecureVirtualMemory) *g_pfnrtMmUnsecureVirtualMemory;
+
+extern PFNRTRTLGETVERSION g_pfnrtRtlGetVersion;
+#ifdef RT_ARCH_X86
+extern PFNRTKEQUERYINTERRUPTTIME g_pfnrtKeQueryInterruptTime;
+#endif
+extern PFNRTKEQUERYINTERRUPTTIMEPRECISE g_pfnrtKeQueryInterruptTimePrecise;
+extern PFNRTKEQUERYSYSTEMTIMEPRECISE g_pfnrtKeQuerySystemTimePrecise;
+
+extern uint32_t g_offrtNtPbQuantumEnd;
+extern uint32_t g_cbrtNtPbQuantumEnd;
+extern uint32_t g_offrtNtPbDpcQueueDepth;
+
+/** Makes an NT version for checking against g_uRtNtVersion. */
+#define RTNT_MAKE_VERSION(uMajor, uMinor) RT_MAKE_U32(uMinor, uMajor)
+
+extern uint32_t g_uRtNtVersion;
+extern uint8_t g_uRtNtMajorVer;
+extern uint8_t g_uRtNtMinorVer;
+extern uint32_t g_uRtNtBuildNo;
+
+extern uintptr_t const *g_puRtMmHighestUserAddress;
+extern uintptr_t const *g_puRtMmSystemRangeStart;
+
+
+int __stdcall rtMpPokeCpuUsingFailureNotSupported(RTCPUID idCpu);
+int __stdcall rtMpPokeCpuUsingDpc(RTCPUID idCpu);
+int __stdcall rtMpPokeCpuUsingBroadcastIpi(RTCPUID idCpu);
+int __stdcall rtMpPokeCpuUsingHalReqestIpiW7Plus(RTCPUID idCpu);
+int __stdcall rtMpPokeCpuUsingHalReqestIpiPreW7(RTCPUID idCpu);
+
+struct RTNTSDBOSVER;
+DECLHIDDEN(int) rtR0MpNtInit(struct RTNTSDBOSVER const *pOsVerInfo);
+DECLHIDDEN(void) rtR0MpNtTerm(void);
+DECLHIDDEN(int) rtMpNtSetTargetProcessorDpc(KDPC *pDpc, RTCPUID idCpu);
+#if defined(RT_ARCH_X86) && defined(NIL_RTDBGKRNLINFO)
+DECLHIDDEN(int) rtR0Nt3InitSymbols(RTDBGKRNLINFO hKrnlInfo);
+#endif
+
+RT_C_DECLS_END
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_nt_internal_r0drv_nt_h */
+
diff --git a/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp
new file mode 100644
index 00000000..a65513a8
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp
@@ -0,0 +1,1010 @@
+/* $Id: memobj-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+
+#include <iprt/memobj.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/log.h>
+#include <iprt/param.h>
+#include <iprt/string.h>
+#include <iprt/process.h>
+#include "internal/memobj.h"
+#include "internal-r0drv-nt.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** Maximum number of bytes we try to lock down in one go.
+ * This is supposed to have a limit right below 256MB, but this appears
+ * to actually be much lower. The values here have been determined experimentally.
+ */
+#ifdef RT_ARCH_X86
+# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
+#endif
+#ifdef RT_ARCH_AMD64
+# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The NT version of the memory object structure.
+ */
+typedef struct RTR0MEMOBJNT
+{
+ /** The core structure. */
+ RTR0MEMOBJINTERNAL Core;
+ /** Used MmAllocatePagesForMdl(). */
+ bool fAllocatedPagesForMdl;
+ /** Pointer returned by MmSecureVirtualMemory */
+ PVOID pvSecureMem;
+ /** The number of PMDLs (memory descriptor lists) in the array. */
+ uint32_t cMdls;
+ /** Array of MDL pointers. (variable size) */
+ PMDL apMdls[1];
+} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
+
+
+
+DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
+{
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
+
+ /*
+ * Deal with it on a per type basis (just as a variation).
+ */
+ switch (pMemNt->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_LOW:
+ if (pMemNt->fAllocatedPagesForMdl)
+ {
+ Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
+ MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
+ pMemNt->Core.pv = NULL;
+ if (pMemNt->pvSecureMem)
+ {
+ g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
+ pMemNt->pvSecureMem = NULL;
+ }
+
+ g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
+ ExFreePool(pMemNt->apMdls[0]);
+ pMemNt->apMdls[0] = NULL;
+ pMemNt->cMdls = 0;
+ break;
+ }
+ AssertFailed();
+ break;
+
+ case RTR0MEMOBJTYPE_PAGE:
+ Assert(pMemNt->Core.pv);
+ if (g_pfnrtExFreePoolWithTag)
+ g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG);
+ else
+ ExFreePool(pMemNt->Core.pv);
+ pMemNt->Core.pv = NULL;
+
+ Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
+ IoFreeMdl(pMemNt->apMdls[0]);
+ pMemNt->apMdls[0] = NULL;
+ pMemNt->cMdls = 0;
+ break;
+
+ case RTR0MEMOBJTYPE_CONT:
+ Assert(pMemNt->Core.pv);
+ MmFreeContiguousMemory(pMemNt->Core.pv);
+ pMemNt->Core.pv = NULL;
+
+ Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
+ IoFreeMdl(pMemNt->apMdls[0]);
+ pMemNt->apMdls[0] = NULL;
+ pMemNt->cMdls = 0;
+ break;
+
+ case RTR0MEMOBJTYPE_PHYS:
+ /* rtR0MemObjNativeEnterPhys? */
+ if (!pMemNt->Core.u.Phys.fAllocated)
+ {
+ Assert(!pMemNt->fAllocatedPagesForMdl);
+ /* Nothing to do here. */
+ break;
+ }
+ RT_FALL_THRU();
+
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ if (pMemNt->fAllocatedPagesForMdl)
+ {
+ g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
+ ExFreePool(pMemNt->apMdls[0]);
+ pMemNt->apMdls[0] = NULL;
+ pMemNt->cMdls = 0;
+ break;
+ }
+ AssertFailed();
+ break;
+
+ case RTR0MEMOBJTYPE_LOCK:
+ if (pMemNt->pvSecureMem)
+ {
+ g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
+ pMemNt->pvSecureMem = NULL;
+ }
+ for (uint32_t i = 0; i < pMemNt->cMdls; i++)
+ {
+ MmUnlockPages(pMemNt->apMdls[i]);
+ IoFreeMdl(pMemNt->apMdls[i]);
+ pMemNt->apMdls[i] = NULL;
+ }
+ break;
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
+ {
+ }
+ else
+ {
+ }*/
+ AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
+ return VERR_INTERNAL_ERROR;
+ break;
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ {
+ Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
+ PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
+ Assert(pMemNtParent);
+ if (pMemNtParent->cMdls)
+ {
+ Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
+ Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
+ || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
+ MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
+ }
+ else
+ {
+ Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
+ && !pMemNtParent->Core.u.Phys.fAllocated);
+ Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
+ MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
+ }
+ pMemNt->Core.pv = NULL;
+ break;
+ }
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
+ return VERR_INTERNAL_ERROR;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
+ RT_NOREF1(fExecutable);
+
+ /*
+ * Try allocate the memory and create an MDL for them so
+ * we can query the physical addresses and do mappings later
+ * without running into out-of-memory conditions and similar problems.
+ */
+ int rc = VERR_NO_PAGE_MEMORY;
+ void *pv;
+ if (g_pfnrtExAllocatePoolWithTag)
+ pv = g_pfnrtExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
+ else
+ pv = ExAllocatePool(NonPagedPool, cb);
+ if (pv)
+ {
+ PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
+ if (pMdl)
+ {
+ MmBuildMdlForNonPagedPool(pMdl);
+#ifdef RT_ARCH_AMD64
+ MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
+#endif
+
+ /*
+ * Create the IPRT memory object.
+ */
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
+ if (pMemNt)
+ {
+ pMemNt->cMdls = 1;
+ pMemNt->apMdls[0] = pMdl;
+ *ppMem = &pMemNt->Core;
+ return VINF_SUCCESS;
+ }
+
+ rc = VERR_NO_MEMORY;
+ IoFreeMdl(pMdl);
+ }
+ ExFreePool(pv);
+ }
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
+
+ /*
+ * Try see if we get lucky first...
+ * (We could probably just assume we're lucky on NT4.)
+ */
+ int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
+ if (RT_SUCCESS(rc))
+ {
+ size_t iPage = cb >> PAGE_SHIFT;
+ while (iPage-- > 0)
+ if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
+ {
+ rc = VERR_NO_LOW_MEMORY;
+ break;
+ }
+ if (RT_SUCCESS(rc))
+ return rc;
+
+ /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
+ RTR0MemObjFree(*ppMem, false);
+ *ppMem = NULL;
+ }
+
+ /*
+ * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
+ */
+ if ( g_pfnrtMmAllocatePagesForMdl
+ && g_pfnrtMmFreePagesFromMdl
+ && g_pfnrtMmMapLockedPagesSpecifyCache)
+ {
+ PHYSICAL_ADDRESS Zero;
+ Zero.QuadPart = 0;
+ PHYSICAL_ADDRESS HighAddr;
+ HighAddr.QuadPart = _4G - 1;
+ PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
+ if (pMdl)
+ {
+ if (MmGetMdlByteCount(pMdl) >= cb)
+ {
+ __try
+ {
+ void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
+ FALSE /* no bug check on failure */, NormalPagePriority);
+ if (pv)
+ {
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
+ if (pMemNt)
+ {
+ pMemNt->fAllocatedPagesForMdl = true;
+ pMemNt->cMdls = 1;
+ pMemNt->apMdls[0] = pMdl;
+ *ppMem = &pMemNt->Core;
+ return VINF_SUCCESS;
+ }
+ MmUnmapLockedPages(pv, pMdl);
+ }
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+# ifdef LOG_ENABLED
+ NTSTATUS rcNt = GetExceptionCode();
+ Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
+# endif
+ /* nothing */
+ }
+ }
+ g_pfnrtMmFreePagesFromMdl(pMdl);
+ ExFreePool(pMdl);
+ }
+ }
+
+ /*
+ * Fall back on contiguous memory...
+ */
+ return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
+}
+
+
+/**
+ * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
+ * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
+ * to what rtR0MemObjNativeAllocCont() does.
+ *
+ * @returns IPRT status code.
+ * @param ppMem Where to store the pointer to the ring-0 memory object.
+ * @param cb The size.
+ * @param fExecutable Whether the mapping should be executable or not.
+ * @param PhysHighest The highest physical address for the pages in allocation.
+ * @param uAlignment The alignment of the physical memory to allocate.
+ * Supported values are PAGE_SIZE, _2M, _4M and _1G.
+ */
+static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
+ size_t uAlignment)
+{
+ AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
+ RT_NOREF1(fExecutable);
+
+ /*
+ * Allocate the memory and create an MDL for it.
+ */
+ PHYSICAL_ADDRESS PhysAddrHighest;
+ PhysAddrHighest.QuadPart = PhysHighest;
+ void *pv;
+ if (g_pfnrtMmAllocateContiguousMemorySpecifyCache)
+ {
+ PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
+ PhysAddrLowest.QuadPart = 0;
+ PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
+ pv = g_pfnrtMmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
+ }
+ else if (uAlignment == PAGE_SIZE)
+ pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
+ else
+ return VERR_NOT_SUPPORTED;
+ if (!pv)
+ return VERR_NO_MEMORY;
+
+ PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
+ if (pMdl)
+ {
+ MmBuildMdlForNonPagedPool(pMdl);
+#ifdef RT_ARCH_AMD64
+ MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
+#endif
+
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
+ if (pMemNt)
+ {
+ pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
+ pMemNt->cMdls = 1;
+ pMemNt->apMdls[0] = pMdl;
+ *ppMem = &pMemNt->Core;
+ return VINF_SUCCESS;
+ }
+
+ IoFreeMdl(pMdl);
+ }
+ MmFreeContiguousMemory(pv);
+ return VERR_NO_MEMORY;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
+{
+ /*
+ * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
+ *
+ * This is preferable to using MmAllocateContiguousMemory because there are
+ * a few situations where the memory shouldn't be mapped, like for instance
+ * VT-x control memory. Since these are rather small allocations (one or
+ * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
+ * request.
+ *
+ * If the allocation is big, the chances are *probably* not very good. The
+ * current limit is kind of random...
+ */
+ if ( cb < _128K
+ && uAlignment == PAGE_SIZE
+ && g_pfnrtMmAllocatePagesForMdl
+ && g_pfnrtMmFreePagesFromMdl)
+ {
+ PHYSICAL_ADDRESS Zero;
+ Zero.QuadPart = 0;
+ PHYSICAL_ADDRESS HighAddr;
+ HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
+ PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
+ if (pMdl)
+ {
+ if (MmGetMdlByteCount(pMdl) >= cb)
+ {
+ PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
+ PFN_NUMBER Pfn = paPfns[0] + 1;
+ const size_t cPages = cb >> PAGE_SHIFT;
+ size_t iPage;
+ for (iPage = 1; iPage < cPages; iPage++, Pfn++)
+ if (paPfns[iPage] != Pfn)
+ break;
+ if (iPage >= cPages)
+ {
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (pMemNt)
+ {
+ pMemNt->Core.u.Phys.fAllocated = true;
+ pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
+ pMemNt->fAllocatedPagesForMdl = true;
+ pMemNt->cMdls = 1;
+ pMemNt->apMdls[0] = pMdl;
+ *ppMem = &pMemNt->Core;
+ return VINF_SUCCESS;
+ }
+ }
+ }
+ g_pfnrtMmFreePagesFromMdl(pMdl);
+ ExFreePool(pMdl);
+ }
+ }
+
+ return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
+{
+ if (g_pfnrtMmAllocatePagesForMdl && g_pfnrtMmFreePagesFromMdl)
+ {
+ PHYSICAL_ADDRESS Zero;
+ Zero.QuadPart = 0;
+ PHYSICAL_ADDRESS HighAddr;
+ HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
+ PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
+ if (pMdl)
+ {
+ if (MmGetMdlByteCount(pMdl) >= cb)
+ {
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
+ if (pMemNt)
+ {
+ pMemNt->fAllocatedPagesForMdl = true;
+ pMemNt->cMdls = 1;
+ pMemNt->apMdls[0] = pMdl;
+ *ppMem = &pMemNt->Core;
+ return VINF_SUCCESS;
+ }
+ }
+ g_pfnrtMmFreePagesFromMdl(pMdl);
+ ExFreePool(pMdl);
+ }
+ return VERR_NO_MEMORY;
+ }
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
+{
+ AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE || uCachePolicy == RTMEM_CACHE_POLICY_MMIO, VERR_NOT_SUPPORTED);
+
+ /*
+ * Validate the address range and create a descriptor for it.
+ */
+ PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
+ if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
+ return VERR_ADDRESS_TOO_BIG;
+
+ /*
+ * Create the IPRT memory object.
+ */
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (pMemNt)
+ {
+ pMemNt->Core.u.Phys.PhysBase = Phys;
+ pMemNt->Core.u.Phys.fAllocated = false;
+ pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
+ *ppMem = &pMemNt->Core;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Internal worker for locking down pages.
+ *
+ * @return IPRT status code.
+ *
+ * @param ppMem Where to store the memory object pointer.
+ * @param pv First page.
+ * @param cb Number of bytes.
+ * @param fAccess The desired access, a combination of RTMEM_PROT_READ
+ * and RTMEM_PROT_WRITE.
+ * @param R0Process The process \a pv and \a cb refers to.
+ */
+static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
+{
+ /*
+ * Calc the number of MDLs we need and allocate the memory object structure.
+ */
+ size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
+ if (cb % MAX_LOCK_MEM_SIZE)
+ cMdls++;
+ if (cMdls >= UINT32_MAX)
+ return VERR_OUT_OF_RANGE;
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[cMdls]),
+ RTR0MEMOBJTYPE_LOCK, pv, cb);
+ if (!pMemNt)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Loop locking down the sub parts of the memory.
+ */
+ int rc = VINF_SUCCESS;
+ size_t cbTotal = 0;
+ uint8_t *pb = (uint8_t *)pv;
+ uint32_t iMdl;
+ for (iMdl = 0; iMdl < cMdls; iMdl++)
+ {
+ /*
+ * Calc the Mdl size and allocate it.
+ */
+ size_t cbCur = cb - cbTotal;
+ if (cbCur > MAX_LOCK_MEM_SIZE)
+ cbCur = MAX_LOCK_MEM_SIZE;
+ AssertMsg(cbCur, ("cbCur: 0!\n"));
+ PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
+ if (!pMdl)
+ {
+ rc = VERR_NO_MEMORY;
+ break;
+ }
+
+ /*
+ * Lock the pages.
+ */
+ __try
+ {
+ MmProbeAndLockPages(pMdl,
+ R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
+ fAccess == RTMEM_PROT_READ
+ ? IoReadAccess
+ : fAccess == RTMEM_PROT_WRITE
+ ? IoWriteAccess
+ : IoModifyAccess);
+
+ pMemNt->apMdls[iMdl] = pMdl;
+ pMemNt->cMdls++;
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ IoFreeMdl(pMdl);
+ rc = VERR_LOCK_FAILED;
+ break;
+ }
+
+ if ( R0Process != NIL_RTR0PROCESS
+ && g_pfnrtMmSecureVirtualMemory
+ && g_pfnrtMmUnsecureVirtualMemory)
+ {
+ /* Make sure the user process can't change the allocation. */
+ pMemNt->pvSecureMem = g_pfnrtMmSecureVirtualMemory(pv, cb,
+ fAccess & RTMEM_PROT_WRITE
+ ? PAGE_READWRITE
+ : PAGE_READONLY);
+ if (!pMemNt->pvSecureMem)
+ {
+ rc = VERR_NO_MEMORY;
+ break;
+ }
+ }
+
+ /* next */
+ cbTotal += cbCur;
+ pb += cbCur;
+ }
+ if (RT_SUCCESS(rc))
+ {
+ Assert(pMemNt->cMdls == cMdls);
+ pMemNt->Core.u.Lock.R0Process = R0Process;
+ *ppMem = &pMemNt->Core;
+ return rc;
+ }
+
+ /*
+ * We failed, perform cleanups.
+ */
+ while (iMdl-- > 0)
+ {
+ MmUnlockPages(pMemNt->apMdls[iMdl]);
+ IoFreeMdl(pMemNt->apMdls[iMdl]);
+ pMemNt->apMdls[iMdl] = NULL;
+ }
+ if (pMemNt->pvSecureMem)
+ {
+ if (g_pfnrtMmUnsecureVirtualMemory)
+ g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
+ pMemNt->pvSecureMem = NULL;
+ }
+
+ rtR0MemObjDelete(&pMemNt->Core);
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
+ RTR0PROCESS R0Process)
+{
+ AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
+ /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
+ return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
+{
+ return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
+{
+ /*
+ * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
+ */
+ RT_NOREF4(ppMem, pvFixed, cb, uAlignment);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
+ RTR0PROCESS R0Process)
+{
+ /*
+ * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
+ */
+ RT_NOREF5(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+/**
+ * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
+ *
+ * @returns IPRT status code.
+ * @param ppMem Where to store the memory object for the mapping.
+ * @param pMemToMap The memory object to map.
+ * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
+ * @param uAlignment The alignment requirement for the mapping.
+ * @param fProt The desired page protection for the mapping.
+ * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
+ * If not nil, it's the current process.
+ */
+static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
+ unsigned fProt, RTR0PROCESS R0Process)
+{
+ int rc = VERR_MAP_FAILED;
+
+ /*
+ * Check that the specified alignment is supported.
+ */
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * There are two basic cases here, either we've got an MDL and can
+ * map it using MmMapLockedPages, or we've got a contiguous physical
+ * range (MMIO most likely) and can use MmMapIoSpace.
+ */
+ PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
+ if (pMemNtToMap->cMdls)
+ {
+ /* don't attempt map locked regions with more than one mdl. */
+ if (pMemNtToMap->cMdls != 1)
+ return VERR_NOT_SUPPORTED;
+
+ /* Need g_pfnrtMmMapLockedPagesSpecifyCache to map to a specific address. */
+ if (pvFixed != (void *)-1 && g_pfnrtMmMapLockedPagesSpecifyCache == NULL)
+ return VERR_NOT_SUPPORTED;
+
+ /* we can't map anything to the first page, sorry. */
+ if (pvFixed == 0)
+ return VERR_NOT_SUPPORTED;
+
+ /* only one system mapping for now - no time to figure out MDL restrictions right now. */
+ if ( pMemNtToMap->Core.uRel.Parent.cMappings
+ && R0Process == NIL_RTR0PROCESS)
+ {
+ if (pMemNtToMap->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC)
+ return VERR_NOT_SUPPORTED;
+ uint32_t iMapping = pMemNtToMap->Core.uRel.Parent.cMappings;
+ while (iMapping-- > 0)
+ {
+ PRTR0MEMOBJNT pMapping = (PRTR0MEMOBJNT)pMemNtToMap->Core.uRel.Parent.papMappings[iMapping];
+ if ( pMapping->Core.enmType != RTR0MEMOBJTYPE_MAPPING
+ || pMapping->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
+ return VERR_NOT_SUPPORTED;
+ }
+ }
+
+ __try
+ {
+ /** @todo uAlignment */
+ /** @todo How to set the protection on the pages? */
+ void *pv;
+ if (g_pfnrtMmMapLockedPagesSpecifyCache)
+ pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
+ R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
+ MmCached,
+ pvFixed != (void *)-1 ? pvFixed : NULL,
+ FALSE /* no bug check on failure */,
+ NormalPagePriority);
+ else
+ pv = MmMapLockedPages(pMemNtToMap->apMdls[0],
+ R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
+ if (pv)
+ {
+ NOREF(fProt);
+
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
+ pMemNtToMap->Core.cb);
+ if (pMemNt)
+ {
+ pMemNt->Core.u.Mapping.R0Process = R0Process;
+ *ppMem = &pMemNt->Core;
+ return VINF_SUCCESS;
+ }
+
+ rc = VERR_NO_MEMORY;
+ MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
+ }
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+#ifdef LOG_ENABLED
+ NTSTATUS rcNt = GetExceptionCode();
+ Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
+#endif
+
+ /* nothing */
+ rc = VERR_MAP_FAILED;
+ }
+
+ }
+ else
+ {
+ AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
+ && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
+
+ /* cannot map phys mem to user space (yet). */
+ if (R0Process != NIL_RTR0PROCESS)
+ return VERR_NOT_SUPPORTED;
+
+ /** @todo uAlignment */
+ /** @todo How to set the protection on the pages? */
+ PHYSICAL_ADDRESS Phys;
+ Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
+ void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
+ pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
+ if (pv)
+ {
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
+ pMemNtToMap->Core.cb);
+ if (pMemNt)
+ {
+ pMemNt->Core.u.Mapping.R0Process = R0Process;
+ *ppMem = &pMemNt->Core;
+ return VINF_SUCCESS;
+ }
+
+ rc = VERR_NO_MEMORY;
+ MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
+ }
+ }
+
+ NOREF(uAlignment); NOREF(fProt);
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
+ unsigned fProt, size_t offSub, size_t cbSub)
+{
+ AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
+ return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed,
+ size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
+{
+ AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
+ return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
+{
+#if 0
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
+#endif
+
+ /*
+ * Seems there are some issues with this MmProtectMdlSystemAddress API, so
+ * this code isn't currently enabled until we've tested it with the verifier.
+ */
+#if 0
+ /*
+ * The API we've got requires a kernel mapping.
+ */
+ if ( pMemNt->cMdls
+ && g_pfnrtMmProtectMdlSystemAddress
+ && (g_uRtNtMajorVer > 6 || (g_uRtNtMajorVer == 6 && g_uRtNtMinorVer >= 1)) /* Windows 7 and later. */
+ && pMemNt->Core.pv != NULL
+ && ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_PAGE
+ || pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOW
+ || pMemNt->Core.enmType == RTR0MEMOBJTYPE_CONT
+ || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK
+ && pMemNt->Core.u.Lock.R0Process == NIL_RTPROCESS)
+ || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_MAPPING
+ && pMemNt->Core.u.Mapping.R0Process == NIL_RTPROCESS) ) )
+ {
+ /* Convert the protection. */
+ LOCK_OPERATION enmLockOp;
+ ULONG fAccess;
+ switch (fProt)
+ {
+ case RTMEM_PROT_NONE:
+ fAccess = PAGE_NOACCESS;
+ enmLockOp = IoReadAccess;
+ break;
+ case RTMEM_PROT_READ:
+ fAccess = PAGE_READONLY;
+ enmLockOp = IoReadAccess;
+ break;
+ case RTMEM_PROT_WRITE:
+ case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
+ fAccess = PAGE_READWRITE;
+ enmLockOp = IoModifyAccess;
+ break;
+ case RTMEM_PROT_EXEC:
+ fAccess = PAGE_EXECUTE;
+ enmLockOp = IoReadAccess;
+ break;
+ case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
+ fAccess = PAGE_EXECUTE_READ;
+ enmLockOp = IoReadAccess;
+ break;
+ case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE:
+ case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ:
+ fAccess = PAGE_EXECUTE_READWRITE;
+ enmLockOp = IoModifyAccess;
+ break;
+ default:
+ AssertFailedReturn(VERR_INVALID_FLAGS);
+ }
+
+ NTSTATUS rcNt = STATUS_SUCCESS;
+# if 0 /** @todo test this against the verifier. */
+ if (offSub == 0 && pMemNt->Core.cb == cbSub)
+ {
+ uint32_t iMdl = pMemNt->cMdls;
+ while (iMdl-- > 0)
+ {
+ rcNt = g_pfnrtMmProtectMdlSystemAddress(pMemNt->apMdls[i], fAccess);
+ if (!NT_SUCCESS(rcNt))
+ break;
+ }
+ }
+ else
+# endif
+ {
+ /*
+ * We ASSUME the following here:
+ * - MmProtectMdlSystemAddress can deal with nonpaged pool memory
+ * - MmProtectMdlSystemAddress doesn't actually store anything in the MDL we pass it.
+ * - We are not required to call MmProtectMdlSystemAddress with PAGE_READWRITE for the
+ * exact same ranges prior to freeing them.
+ *
+ * So, we lock the pages temporarily, call the API and unlock them.
+ */
+ uint8_t *pbCur = (uint8_t *)pMemNt->Core.pv + offSub;
+ while (cbSub > 0 && NT_SUCCESS(rcNt))
+ {
+ size_t cbCur = cbSub;
+ if (cbCur > MAX_LOCK_MEM_SIZE)
+ cbCur = MAX_LOCK_MEM_SIZE;
+ PMDL pMdl = IoAllocateMdl(pbCur, (ULONG)cbCur, FALSE, FALSE, NULL);
+ if (pMdl)
+ {
+ __try
+ {
+ MmProbeAndLockPages(pMdl, KernelMode, enmLockOp);
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ rcNt = GetExceptionCode();
+ }
+ if (NT_SUCCESS(rcNt))
+ {
+ rcNt = g_pfnrtMmProtectMdlSystemAddress(pMdl, fAccess);
+ MmUnlockPages(pMdl);
+ }
+ IoFreeMdl(pMdl);
+ }
+ else
+ rcNt = STATUS_NO_MEMORY;
+ pbCur += cbCur;
+ cbSub -= cbCur;
+ }
+ }
+
+ if (NT_SUCCESS(rcNt))
+ return VINF_SUCCESS;
+ return RTErrConvertFromNtStatus(rcNt);
+ }
+#else
+ RT_NOREF4(pMem, offSub, cbSub, fProt);
+#endif
+
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
+{
+ PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
+
+ if (pMemNt->cMdls)
+ {
+ if (pMemNt->cMdls == 1)
+ {
+ PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
+ return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
+ }
+
+ size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
+ size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
+ PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
+ return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
+ }
+
+ switch (pMemNt->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_MAPPING:
+ return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
+
+ case RTR0MEMOBJTYPE_PHYS:
+ return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
+
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_CONT:
+ case RTR0MEMOBJTYPE_LOCK:
+ default:
+ AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ return NIL_RTHCPHYS;
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/memuserkernel-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/memuserkernel-r0drv-nt.cpp
new file mode 100644
index 00000000..2d1c2c62
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/memuserkernel-r0drv-nt.cpp
@@ -0,0 +1,123 @@
+/* $Id: memuserkernel-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - User & Kernel Memory, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+
+#include <iprt/mem.h>
+#include <iprt/errcore.h>
+
+#include "internal-r0drv-nt.h"
+
+
+RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb)
+{
+ __try
+ {
+ ProbeForRead((PVOID)R3PtrSrc, cb, 1);
+ memcpy(pvDst, (void const *)R3PtrSrc, cb);
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ return VERR_ACCESS_DENIED;
+ }
+ return VINF_SUCCESS;
+}
+
+
+RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb)
+{
+ __try
+ {
+ ProbeForWrite((PVOID)R3PtrDst, cb, 1);
+ memcpy((void *)R3PtrDst, pvSrc, cb);
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ return VERR_ACCESS_DENIED;
+ }
+ return VINF_SUCCESS;
+}
+
+
+RTR0DECL(bool) RTR0MemUserIsValidAddr(RTR3PTR R3Ptr)
+{
+#ifdef IPRT_TARGET_NT4
+ uintptr_t const uLast = g_puRtMmHighestUserAddress ? *g_puRtMmHighestUserAddress : ~(uintptr_t)0 / 2;
+#else
+ uintptr_t const uLast = (uintptr_t)MM_HIGHEST_USER_ADDRESS;
+#endif
+ return R3Ptr <= uLast;
+}
+
+
+RTR0DECL(bool) RTR0MemKernelIsValidAddr(void *pv)
+{
+#ifdef IPRT_TARGET_NT4
+ uintptr_t const uFirst = g_puRtMmSystemRangeStart ? *g_puRtMmSystemRangeStart : ~(uintptr_t)0 / 2 + 1;
+#else
+ uintptr_t const uFirst = (uintptr_t)MM_SYSTEM_RANGE_START;
+#endif
+ return (uintptr_t)pv >= uFirst;
+}
+
+
+RTR0DECL(bool) RTR0MemAreKrnlAndUsrDifferent(void)
+{
+ return true;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyFrom(void *pvDst, void const *pvSrc, size_t cb)
+{
+ __try
+ {
+ memcpy(pvDst, pvSrc, cb);
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ return VERR_ACCESS_DENIED;
+ }
+ return VINF_SUCCESS;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyTo(void *pvDst, void const *pvSrc, size_t cb)
+{
+ __try
+ {
+ memcpy(pvDst, pvSrc, cb);
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ return VERR_ACCESS_DENIED;
+ }
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/mp-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/mp-r0drv-nt.cpp
new file mode 100644
index 00000000..b541b031
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/mp-r0drv-nt.cpp
@@ -0,0 +1,1952 @@
+/* $Id: mp-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Multiprocessor, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+
+#include <iprt/mp.h>
+#include <iprt/cpuset.h>
+#include <iprt/err.h>
+#include <iprt/asm.h>
+#include <iprt/log.h>
+#include <iprt/mem.h>
+#include <iprt/time.h>
+#include "r0drv/mp-r0drv.h"
+#include "symdb.h"
+#include "internal-r0drv-nt.h"
+#include "internal/mp.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+typedef enum
+{
+ RT_NT_CPUID_SPECIFIC,
+ RT_NT_CPUID_PAIR,
+ RT_NT_CPUID_OTHERS,
+ RT_NT_CPUID_ALL
+} RT_NT_CPUID;
+
+
+/**
+ * Used by the RTMpOnSpecific.
+ */
+typedef struct RTMPNTONSPECIFICARGS
+{
+ /** Set if we're executing. */
+ bool volatile fExecuting;
+ /** Set when done executing. */
+ bool volatile fDone;
+ /** Number of references to this heap block. */
+ uint32_t volatile cRefs;
+ /** Event that the calling thread is waiting on. */
+ KEVENT DoneEvt;
+ /** The deferred procedure call object. */
+ KDPC Dpc;
+ /** The callback argument package. */
+ RTMPARGS CallbackArgs;
+} RTMPNTONSPECIFICARGS;
+/** Pointer to an argument/state structure for RTMpOnSpecific on NT. */
+typedef RTMPNTONSPECIFICARGS *PRTMPNTONSPECIFICARGS;
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** Inactive bit for g_aidRtMpNtByCpuSetIdx. */
+#define RTMPNT_ID_F_INACTIVE RT_BIT_32(31)
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Maximum number of processor groups. */
+uint32_t g_cRtMpNtMaxGroups;
+/** Maximum number of processors. */
+uint32_t g_cRtMpNtMaxCpus;
+/** Number of active processors. */
+uint32_t volatile g_cRtMpNtActiveCpus;
+/** The NT CPU set.
+ * KeQueryActiveProcssors() cannot be called at all IRQLs and therefore we'll
+ * have to cache it. Fortunately, NT doesn't really support taking CPUs offline,
+ * and taking them online was introduced with W2K8 where it is intended for virtual
+ * machines and not real HW. We update this, g_cRtMpNtActiveCpus and
+ * g_aidRtMpNtByCpuSetIdx from the rtR0NtMpProcessorChangeCallback.
+ */
+RTCPUSET g_rtMpNtCpuSet;
+
+/** Static per group info.
+ * @remarks With RTCPUSET_MAX_CPUS as 256, this takes up 33KB. */
+static struct
+{
+ /** The max CPUs in the group. */
+ uint16_t cMaxCpus;
+ /** The number of active CPUs at the time of initialization. */
+ uint16_t cActiveCpus;
+ /** CPU set indexes for each CPU in the group. */
+ int16_t aidxCpuSetMembers[64];
+} g_aRtMpNtCpuGroups[RTCPUSET_MAX_CPUS];
+/** Maps CPU set indexes to RTCPUID.
+ * Inactive CPUs has bit 31 set (RTMPNT_ID_F_INACTIVE) so we can identify them
+ * and shuffle duplicates during CPU hotplugging. We assign temporary IDs to
+ * the inactive CPUs starting at g_cRtMpNtMaxCpus - 1, ASSUMING that active
+ * CPUs has IDs from 0 to g_cRtMpNtActiveCpus. */
+RTCPUID g_aidRtMpNtByCpuSetIdx[RTCPUSET_MAX_CPUS];
+/** The handle of the rtR0NtMpProcessorChangeCallback registration. */
+static PVOID g_pvMpCpuChangeCallback = NULL;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static VOID __stdcall rtR0NtMpProcessorChangeCallback(void *pvUser, PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT pChangeCtx,
+ PNTSTATUS prcOperationStatus);
+static int rtR0NtInitQueryGroupRelations(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX **ppInfo);
+
+
+
+/**
+ * Initalizes multiprocessor globals (called by rtR0InitNative).
+ *
+ * @returns IPRT status code.
+ * @param pOsVerInfo Version information.
+ */
+DECLHIDDEN(int) rtR0MpNtInit(RTNTSDBOSVER const *pOsVerInfo)
+{
+#define MY_CHECK_BREAK(a_Check, a_DbgPrintArgs) \
+ AssertMsgBreakStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs; rc = VERR_INTERNAL_ERROR_4 )
+#define MY_CHECK_RETURN(a_Check, a_DbgPrintArgs, a_rcRet) \
+ AssertMsgReturnStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs, a_rcRet)
+#define MY_CHECK(a_Check, a_DbgPrintArgs) \
+ AssertMsgStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs; rc = VERR_INTERNAL_ERROR_4 )
+
+ /*
+ * API combination checks.
+ */
+ MY_CHECK_RETURN(!g_pfnrtKeSetTargetProcessorDpcEx || g_pfnrtKeGetProcessorNumberFromIndex,
+ ("IPRT: Fatal: Missing KeSetTargetProcessorDpcEx without KeGetProcessorNumberFromIndex!\n"),
+ VERR_SYMBOL_NOT_FOUND);
+
+ /*
+ * Get max number of processor groups.
+ *
+ * We may need to upadjust this number below, because windows likes to keep
+ * all options open when it comes to hotplugged CPU group assignments. A
+ * server advertising up to 64 CPUs in the ACPI table will get a result of
+ * 64 from KeQueryMaximumGroupCount. That makes sense. However, when windows
+ * server 2012 does a two processor group setup for it, the sum of the
+ * GroupInfo[*].MaximumProcessorCount members below is 128. This is probably
+ * because windows doesn't want to make decisions grouping of hotpluggable CPUs.
+ * So, we need to bump the maximum count to 128 below do deal with this as we
+ * want to have valid CPU set indexes for all potential CPUs - how could we
+ * otherwise use the RTMpGetSet() result and also RTCpuSetCount(RTMpGetSet())
+ * should equal RTMpGetCount().
+ */
+ if (g_pfnrtKeQueryMaximumGroupCount)
+ {
+ g_cRtMpNtMaxGroups = g_pfnrtKeQueryMaximumGroupCount();
+ MY_CHECK_RETURN(g_cRtMpNtMaxGroups <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxGroups > 0,
+ ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u\n", g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
+ VERR_MP_TOO_MANY_CPUS);
+ }
+ else
+ g_cRtMpNtMaxGroups = 1;
+
+ /*
+ * Get max number CPUs.
+ * This also defines the range of NT CPU indexes, RTCPUID and index into RTCPUSET.
+ */
+ if (g_pfnrtKeQueryMaximumProcessorCountEx)
+ {
+ g_cRtMpNtMaxCpus = g_pfnrtKeQueryMaximumProcessorCountEx(ALL_PROCESSOR_GROUPS);
+ MY_CHECK_RETURN(g_cRtMpNtMaxCpus <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxCpus > 0,
+ ("IPRT: Fatal: g_cRtMpNtMaxCpus=%u, max %u [KeQueryMaximumProcessorCountEx]\n",
+ g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
+ VERR_MP_TOO_MANY_CPUS);
+ }
+ else if (g_pfnrtKeQueryMaximumProcessorCount)
+ {
+ g_cRtMpNtMaxCpus = g_pfnrtKeQueryMaximumProcessorCount();
+ MY_CHECK_RETURN(g_cRtMpNtMaxCpus <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxCpus > 0,
+ ("IPRT: Fatal: g_cRtMpNtMaxCpus=%u, max %u [KeQueryMaximumProcessorCount]\n",
+ g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
+ VERR_MP_TOO_MANY_CPUS);
+ }
+ else if (g_pfnrtKeQueryActiveProcessors)
+ {
+ KAFFINITY fActiveProcessors = g_pfnrtKeQueryActiveProcessors();
+ MY_CHECK_RETURN(fActiveProcessors != 0,
+ ("IPRT: Fatal: KeQueryActiveProcessors returned 0!\n"),
+ VERR_INTERNAL_ERROR_2);
+ g_cRtMpNtMaxCpus = 0;
+ do
+ {
+ g_cRtMpNtMaxCpus++;
+ fActiveProcessors >>= 1;
+ } while (fActiveProcessors);
+ }
+ else
+ g_cRtMpNtMaxCpus = KeNumberProcessors;
+
+ /*
+ * Just because we're a bit paranoid about getting something wrong wrt to the
+ * kernel interfaces, we try 16 times to get the KeQueryActiveProcessorCountEx
+ * and KeQueryLogicalProcessorRelationship information to match up.
+ */
+ for (unsigned cTries = 0;; cTries++)
+ {
+ /*
+ * Get number of active CPUs.
+ */
+ if (g_pfnrtKeQueryActiveProcessorCountEx)
+ {
+ g_cRtMpNtActiveCpus = g_pfnrtKeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS);
+ MY_CHECK_RETURN(g_cRtMpNtActiveCpus <= g_cRtMpNtMaxCpus && g_cRtMpNtActiveCpus > 0,
+ ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u [KeQueryActiveProcessorCountEx]\n",
+ g_cRtMpNtMaxGroups, g_cRtMpNtMaxCpus),
+ VERR_MP_TOO_MANY_CPUS);
+ }
+ else if (g_pfnrtKeQueryActiveProcessorCount)
+ {
+ g_cRtMpNtActiveCpus = g_pfnrtKeQueryActiveProcessorCount(NULL);
+ MY_CHECK_RETURN(g_cRtMpNtActiveCpus <= g_cRtMpNtMaxCpus && g_cRtMpNtActiveCpus > 0,
+ ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u [KeQueryActiveProcessorCount]\n",
+ g_cRtMpNtMaxGroups, g_cRtMpNtMaxCpus),
+ VERR_MP_TOO_MANY_CPUS);
+ }
+ else
+ g_cRtMpNtActiveCpus = g_cRtMpNtMaxCpus;
+
+ /*
+ * Query the details for the groups to figure out which CPUs are online as
+ * well as the NT index limit.
+ */
+ for (unsigned i = 0; i < RT_ELEMENTS(g_aidRtMpNtByCpuSetIdx); i++)
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ g_aidRtMpNtByCpuSetIdx[i] = NIL_RTCPUID;
+#else
+ g_aidRtMpNtByCpuSetIdx[i] = i < g_cRtMpNtMaxCpus ? i : NIL_RTCPUID;
+#endif
+ for (unsigned idxGroup = 0; idxGroup < RT_ELEMENTS(g_aRtMpNtCpuGroups); idxGroup++)
+ {
+ g_aRtMpNtCpuGroups[idxGroup].cMaxCpus = 0;
+ g_aRtMpNtCpuGroups[idxGroup].cActiveCpus = 0;
+ for (unsigned idxMember = 0; idxMember < RT_ELEMENTS(g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers); idxMember++)
+ g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = -1;
+ }
+
+ if (g_pfnrtKeQueryLogicalProcessorRelationship)
+ {
+ MY_CHECK_RETURN(g_pfnrtKeGetProcessorIndexFromNumber,
+ ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeGetProcessorIndexFromNumber!\n"),
+ VERR_SYMBOL_NOT_FOUND);
+ MY_CHECK_RETURN(g_pfnrtKeGetProcessorNumberFromIndex,
+ ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeGetProcessorIndexFromNumber!\n"),
+ VERR_SYMBOL_NOT_FOUND);
+ MY_CHECK_RETURN(g_pfnrtKeSetTargetProcessorDpcEx,
+ ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeSetTargetProcessorDpcEx!\n"),
+ VERR_SYMBOL_NOT_FOUND);
+
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pInfo = NULL;
+ int rc = rtR0NtInitQueryGroupRelations(&pInfo);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ MY_CHECK(pInfo->Group.MaximumGroupCount == g_cRtMpNtMaxGroups,
+ ("IPRT: Fatal: MaximumGroupCount=%u != g_cRtMpNtMaxGroups=%u!\n",
+ pInfo->Group.MaximumGroupCount, g_cRtMpNtMaxGroups));
+ MY_CHECK(pInfo->Group.ActiveGroupCount > 0 && pInfo->Group.ActiveGroupCount <= g_cRtMpNtMaxGroups,
+ ("IPRT: Fatal: ActiveGroupCount=%u != g_cRtMpNtMaxGroups=%u!\n",
+ pInfo->Group.ActiveGroupCount, g_cRtMpNtMaxGroups));
+
+ /*
+ * First we need to recalc g_cRtMpNtMaxCpus (see above).
+ */
+ uint32_t cMaxCpus = 0;
+ uint32_t idxGroup;
+ for (idxGroup = 0; RT_SUCCESS(rc) && idxGroup < pInfo->Group.ActiveGroupCount; idxGroup++)
+ {
+ const PROCESSOR_GROUP_INFO *pGrpInfo = &pInfo->Group.GroupInfo[idxGroup];
+ MY_CHECK_BREAK(pGrpInfo->MaximumProcessorCount <= MAXIMUM_PROC_PER_GROUP,
+ ("IPRT: Fatal: MaximumProcessorCount=%u\n", pGrpInfo->MaximumProcessorCount));
+ MY_CHECK_BREAK(pGrpInfo->ActiveProcessorCount <= pGrpInfo->MaximumProcessorCount,
+ ("IPRT: Fatal: ActiveProcessorCount=%u > MaximumProcessorCount=%u\n",
+ pGrpInfo->ActiveProcessorCount, pGrpInfo->MaximumProcessorCount));
+ cMaxCpus += pGrpInfo->MaximumProcessorCount;
+ }
+ if (cMaxCpus > g_cRtMpNtMaxCpus && RT_SUCCESS(rc))
+ {
+ DbgPrint("IPRT: g_cRtMpNtMaxCpus=%u -> %u\n", g_cRtMpNtMaxCpus, cMaxCpus);
+#ifndef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ uint32_t i = RT_MIN(cMaxCpus, RT_ELEMENTS(g_aidRtMpNtByCpuSetIdx));
+ while (i-- > g_cRtMpNtMaxCpus)
+ g_aidRtMpNtByCpuSetIdx[i] = i;
+#endif
+ g_cRtMpNtMaxCpus = cMaxCpus;
+ if (g_cRtMpNtMaxGroups > RTCPUSET_MAX_CPUS)
+ {
+ MY_CHECK(g_cRtMpNtMaxGroups <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxGroups > 0,
+ ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u\n", g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS));
+ rc = VERR_MP_TOO_MANY_CPUS;
+ }
+ }
+
+ /*
+ * Calc online mask, partition IDs and such.
+ *
+ * Also check ASSUMPTIONS:
+ *
+ * 1. Processor indexes going from 0 and up to
+ * KeQueryMaximumProcessorCountEx(ALL_PROCESSOR_GROUPS) - 1.
+ *
+ * 2. Currently valid processor indexes, i.e. accepted by
+ * KeGetProcessorIndexFromNumber & KeGetProcessorNumberFromIndex, goes
+ * from 0 thru KeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS) - 1.
+ *
+ * 3. PROCESSOR_GROUP_INFO::MaximumProcessorCount gives the number of
+ * relevant bits in the ActiveProcessorMask (from LSB).
+ *
+ * 4. Active processor count found in KeQueryLogicalProcessorRelationship
+ * output matches what KeQueryActiveProcessorCountEx(ALL) returns.
+ *
+ * 5. Active + inactive processor counts in same does not exceed
+ * KeQueryMaximumProcessorCountEx(ALL).
+ *
+ * Note! Processor indexes are assigned as CPUs come online and are not
+ * preallocated according to group maximums. Since CPUS are only taken
+ * online and never offlined, this means that internal CPU bitmaps are
+ * never sparse and no time is wasted scanning unused bits.
+ *
+ * Unfortunately, it means that ring-3 cannot easily guess the index
+ * assignments when hotswapping is used, and must use GIP when available.
+ */
+ RTCpuSetEmpty(&g_rtMpNtCpuSet);
+ uint32_t cInactive = 0;
+ uint32_t cActive = 0;
+ uint32_t idxCpuMax = 0;
+ uint32_t idxCpuSetNextInactive = g_cRtMpNtMaxCpus - 1;
+ for (idxGroup = 0; RT_SUCCESS(rc) && idxGroup < pInfo->Group.ActiveGroupCount; idxGroup++)
+ {
+ const PROCESSOR_GROUP_INFO *pGrpInfo = &pInfo->Group.GroupInfo[idxGroup];
+ MY_CHECK_BREAK(pGrpInfo->MaximumProcessorCount <= MAXIMUM_PROC_PER_GROUP,
+ ("IPRT: Fatal: MaximumProcessorCount=%u\n", pGrpInfo->MaximumProcessorCount));
+ MY_CHECK_BREAK(pGrpInfo->ActiveProcessorCount <= pGrpInfo->MaximumProcessorCount,
+ ("IPRT: Fatal: ActiveProcessorCount=%u > MaximumProcessorCount=%u\n",
+ pGrpInfo->ActiveProcessorCount, pGrpInfo->MaximumProcessorCount));
+
+ g_aRtMpNtCpuGroups[idxGroup].cMaxCpus = pGrpInfo->MaximumProcessorCount;
+ g_aRtMpNtCpuGroups[idxGroup].cActiveCpus = pGrpInfo->ActiveProcessorCount;
+
+ for (uint32_t idxMember = 0; idxMember < pGrpInfo->MaximumProcessorCount; idxMember++)
+ {
+ PROCESSOR_NUMBER ProcNum;
+ ProcNum.Group = (USHORT)idxGroup;
+ ProcNum.Number = (UCHAR)idxMember;
+ ProcNum.Reserved = 0;
+ ULONG idxCpu = g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum);
+ if (idxCpu != INVALID_PROCESSOR_INDEX)
+ {
+ MY_CHECK_BREAK(idxCpu < g_cRtMpNtMaxCpus && idxCpu < RTCPUSET_MAX_CPUS, /* ASSUMPTION #1 */
+ ("IPRT: Fatal: idxCpu=%u >= g_cRtMpNtMaxCpus=%u (RTCPUSET_MAX_CPUS=%u)\n",
+ idxCpu, g_cRtMpNtMaxCpus, RTCPUSET_MAX_CPUS));
+ if (idxCpu > idxCpuMax)
+ idxCpuMax = idxCpu;
+ g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpu;
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ g_aidRtMpNtByCpuSetIdx[idxCpu] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember);
+#endif
+
+ ProcNum.Group = UINT16_MAX;
+ ProcNum.Number = UINT8_MAX;
+ ProcNum.Reserved = UINT8_MAX;
+ NTSTATUS rcNt = g_pfnrtKeGetProcessorNumberFromIndex(idxCpu, &ProcNum);
+ MY_CHECK_BREAK(NT_SUCCESS(rcNt),
+ ("IPRT: Fatal: KeGetProcessorNumberFromIndex(%u,) -> %#x!\n", idxCpu, rcNt));
+ MY_CHECK_BREAK(ProcNum.Group == idxGroup && ProcNum.Number == idxMember,
+ ("IPRT: Fatal: KeGetProcessorXxxxFromYyyy roundtrip error for %#x! Group: %u vs %u, Number: %u vs %u\n",
+ idxCpu, ProcNum.Group, idxGroup, ProcNum.Number, idxMember));
+
+ if (pGrpInfo->ActiveProcessorMask & RT_BIT_64(idxMember))
+ {
+ RTCpuSetAddByIndex(&g_rtMpNtCpuSet, idxCpu);
+ cActive++;
+ }
+ else
+ cInactive++; /* (This is a little unexpected, but not important as long as things add up below.) */
+ }
+ else
+ {
+ /* Must be not present / inactive when KeGetProcessorIndexFromNumber fails. */
+ MY_CHECK_BREAK(!(pGrpInfo->ActiveProcessorMask & RT_BIT_64(idxMember)),
+ ("IPRT: Fatal: KeGetProcessorIndexFromNumber(%u/%u) failed but CPU is active! cMax=%u cActive=%u fActive=%p\n",
+ idxGroup, idxMember, pGrpInfo->MaximumProcessorCount, pGrpInfo->ActiveProcessorCount,
+ pGrpInfo->ActiveProcessorMask));
+ cInactive++;
+ if (idxCpuSetNextInactive >= g_cRtMpNtActiveCpus)
+ {
+ g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpuSetNextInactive;
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ g_aidRtMpNtByCpuSetIdx[idxCpuSetNextInactive] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember)
+ | RTMPNT_ID_F_INACTIVE;
+#endif
+ idxCpuSetNextInactive--;
+ }
+ }
+ }
+ }
+
+ MY_CHECK(cInactive + cActive <= g_cRtMpNtMaxCpus, /* ASSUMPTION #5 (not '==' because of inactive groups) */
+ ("IPRT: Fatal: cInactive=%u + cActive=%u > g_cRtMpNtMaxCpus=%u\n", cInactive, cActive, g_cRtMpNtMaxCpus));
+
+ /* Deal with inactive groups using KeQueryMaximumProcessorCountEx or as
+ best as we can by as best we can by stipulating maximum member counts
+ from the previous group. */
+ if ( RT_SUCCESS(rc)
+ && idxGroup < pInfo->Group.MaximumGroupCount)
+ {
+ uint16_t cInactiveLeft = g_cRtMpNtMaxCpus - (cInactive + cActive);
+ while (idxGroup < pInfo->Group.MaximumGroupCount)
+ {
+ uint32_t cMaxMembers = 0;
+ if (g_pfnrtKeQueryMaximumProcessorCountEx)
+ cMaxMembers = g_pfnrtKeQueryMaximumProcessorCountEx(idxGroup);
+ if (cMaxMembers != 0 || cInactiveLeft == 0)
+ AssertStmt(cMaxMembers <= cInactiveLeft, cMaxMembers = cInactiveLeft);
+ else
+ {
+ uint16_t cGroupsLeft = pInfo->Group.MaximumGroupCount - idxGroup;
+ cMaxMembers = pInfo->Group.GroupInfo[idxGroup - 1].MaximumProcessorCount;
+ while (cMaxMembers * cGroupsLeft < cInactiveLeft)
+ cMaxMembers++;
+ if (cMaxMembers > cInactiveLeft)
+ cMaxMembers = cInactiveLeft;
+ }
+
+ g_aRtMpNtCpuGroups[idxGroup].cMaxCpus = (uint16_t)cMaxMembers;
+ g_aRtMpNtCpuGroups[idxGroup].cActiveCpus = 0;
+ for (uint16_t idxMember = 0; idxMember < cMaxMembers; idxMember++)
+ if (idxCpuSetNextInactive >= g_cRtMpNtActiveCpus)
+ {
+ g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpuSetNextInactive;
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ g_aidRtMpNtByCpuSetIdx[idxCpuSetNextInactive] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember)
+ | RTMPNT_ID_F_INACTIVE;
+#endif
+ idxCpuSetNextInactive--;
+ }
+ cInactiveLeft -= cMaxMembers;
+ idxGroup++;
+ }
+ }
+
+ /* We're done with pInfo now, free it so we can start returning when assertions fail. */
+ RTMemFree(pInfo);
+ if (RT_FAILURE(rc)) /* MY_CHECK_BREAK sets rc. */
+ return rc;
+ MY_CHECK_RETURN(cActive >= g_cRtMpNtActiveCpus,
+ ("IPRT: Fatal: cActive=%u < g_cRtMpNtActiveCpus=%u - CPUs removed?\n", cActive, g_cRtMpNtActiveCpus),
+ VERR_INTERNAL_ERROR_3);
+ MY_CHECK_RETURN(idxCpuMax < cActive, /* ASSUMPTION #2 */
+ ("IPRT: Fatal: idCpuMax=%u >= cActive=%u! Unexpected CPU index allocation. CPUs removed?\n",
+ idxCpuMax, cActive),
+ VERR_INTERNAL_ERROR_4);
+
+ /* Retry if CPUs were added. */
+ if ( cActive != g_cRtMpNtActiveCpus
+ && cTries < 16)
+ continue;
+ MY_CHECK_RETURN(cActive == g_cRtMpNtActiveCpus, /* ASSUMPTION #4 */
+ ("IPRT: Fatal: cActive=%u != g_cRtMpNtActiveCpus=%u\n", cActive, g_cRtMpNtActiveCpus),
+ VERR_INTERNAL_ERROR_5);
+ }
+ else
+ {
+ /* Legacy: */
+ MY_CHECK_RETURN(g_cRtMpNtMaxGroups == 1, ("IPRT: Fatal: Missing KeQueryLogicalProcessorRelationship!\n"),
+ VERR_SYMBOL_NOT_FOUND);
+
+ /** @todo Is it possible that the affinity mask returned by
+ * KeQueryActiveProcessors is sparse? */
+ if (g_pfnrtKeQueryActiveProcessors)
+ RTCpuSetFromU64(&g_rtMpNtCpuSet, g_pfnrtKeQueryActiveProcessors());
+ else if (g_cRtMpNtMaxCpus < 64)
+ RTCpuSetFromU64(&g_rtMpNtCpuSet, (UINT64_C(1) << g_cRtMpNtMaxCpus) - 1);
+ else
+ {
+ MY_CHECK_RETURN(g_cRtMpNtMaxCpus == 64, ("IPRT: Fatal: g_cRtMpNtMaxCpus=%u, expect 64 or less\n", g_cRtMpNtMaxCpus),
+ VERR_MP_TOO_MANY_CPUS);
+ RTCpuSetFromU64(&g_rtMpNtCpuSet, UINT64_MAX);
+ }
+
+ g_aRtMpNtCpuGroups[0].cMaxCpus = g_cRtMpNtMaxCpus;
+ g_aRtMpNtCpuGroups[0].cActiveCpus = g_cRtMpNtMaxCpus;
+ for (unsigned i = 0; i < g_cRtMpNtMaxCpus; i++)
+ {
+ g_aRtMpNtCpuGroups[0].aidxCpuSetMembers[i] = i;
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ g_aidRtMpNtByCpuSetIdx[i] = RTMPCPUID_FROM_GROUP_AND_NUMBER(0, i);
+#endif
+ }
+ }
+
+ /*
+ * Register CPU hot plugging callback (it also counts active CPUs).
+ */
+ Assert(g_pvMpCpuChangeCallback == NULL);
+ if (g_pfnrtKeRegisterProcessorChangeCallback)
+ {
+ MY_CHECK_RETURN(g_pfnrtKeDeregisterProcessorChangeCallback,
+ ("IPRT: Fatal: KeRegisterProcessorChangeCallback without KeDeregisterProcessorChangeCallback!\n"),
+ VERR_SYMBOL_NOT_FOUND);
+
+ RTCPUSET const ActiveSetCopy = g_rtMpNtCpuSet;
+ RTCpuSetEmpty(&g_rtMpNtCpuSet);
+ uint32_t const cActiveCpus = g_cRtMpNtActiveCpus;
+ g_cRtMpNtActiveCpus = 0;
+
+ g_pvMpCpuChangeCallback = g_pfnrtKeRegisterProcessorChangeCallback(rtR0NtMpProcessorChangeCallback, NULL /*pvUser*/,
+ KE_PROCESSOR_CHANGE_ADD_EXISTING);
+ if (g_pvMpCpuChangeCallback)
+ {
+ if (cActiveCpus == g_cRtMpNtActiveCpus)
+ { /* likely */ }
+ else
+ {
+ g_pfnrtKeDeregisterProcessorChangeCallback(g_pvMpCpuChangeCallback);
+ if (cTries < 16)
+ {
+ /* Retry if CPUs were added. */
+ MY_CHECK_RETURN(g_cRtMpNtActiveCpus >= cActiveCpus,
+ ("IPRT: Fatal: g_cRtMpNtActiveCpus=%u < cActiveCpus=%u! CPUs removed?\n",
+ g_cRtMpNtActiveCpus, cActiveCpus),
+ VERR_INTERNAL_ERROR_2);
+ MY_CHECK_RETURN(g_cRtMpNtActiveCpus <= g_cRtMpNtMaxCpus,
+ ("IPRT: Fatal: g_cRtMpNtActiveCpus=%u > g_cRtMpNtMaxCpus=%u!\n",
+ g_cRtMpNtActiveCpus, g_cRtMpNtMaxCpus),
+ VERR_INTERNAL_ERROR_2);
+ continue;
+ }
+ MY_CHECK_RETURN(0, ("IPRT: Fatal: g_cRtMpNtActiveCpus=%u cActiveCpus=%u\n", g_cRtMpNtActiveCpus, cActiveCpus),
+ VERR_INTERNAL_ERROR_3);
+ }
+ }
+ else
+ {
+ AssertFailed();
+ g_rtMpNtCpuSet = ActiveSetCopy;
+ g_cRtMpNtActiveCpus = cActiveCpus;
+ }
+ }
+ break;
+ } /* Retry loop for stable active CPU count. */
+
+#undef MY_CHECK_RETURN
+
+ /*
+ * Special IPI fun for RTMpPokeCpu.
+ *
+ * On Vista and later the DPC method doesn't seem to reliably send IPIs,
+ * so we have to use alternative methods.
+ *
+ * On AMD64 We used to use the HalSendSoftwareInterrupt API (also x86 on
+ * W10+), it looks faster and more convenient to use, however we're either
+ * using it wrong or it doesn't reliably do what we want (see @bugref{8343}).
+ *
+ * The HalRequestIpip API is thus far the only alternative to KeInsertQueueDpc
+ * for doing targetted IPIs. Trouble with this API is that it changed
+ * fundamentally in Window 7 when they added support for lots of processors.
+ *
+ * If we really think we cannot use KeInsertQueueDpc, we use the broadcast IPI
+ * API KeIpiGenericCall.
+ */
+ if ( pOsVerInfo->uMajorVer > 6
+ || (pOsVerInfo->uMajorVer == 6 && pOsVerInfo->uMinorVer > 0))
+ g_pfnrtHalRequestIpiPreW7 = NULL;
+ else
+ g_pfnrtHalRequestIpiW7Plus = NULL;
+
+ if ( g_pfnrtHalRequestIpiW7Plus
+ && g_pfnrtKeInitializeAffinityEx
+ && g_pfnrtKeAddProcessorAffinityEx
+ && g_pfnrtKeGetProcessorIndexFromNumber)
+ {
+ DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingHalReqestIpiW7Plus\n");
+ g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingHalReqestIpiW7Plus;
+ }
+ else if (pOsVerInfo->uMajorVer >= 6 && g_pfnrtKeIpiGenericCall)
+ {
+ DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingBroadcastIpi\n");
+ g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingBroadcastIpi;
+ }
+ else if (g_pfnrtKeSetTargetProcessorDpc)
+ {
+ DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingDpc\n");
+ g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingDpc;
+ /* Windows XP should send always send an IPI -> VERIFY */
+ }
+ else
+ {
+ DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingFailureNotSupported\n");
+ Assert(pOsVerInfo->uMajorVer == 3 && pOsVerInfo->uMinorVer <= 50);
+ g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingFailureNotSupported;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Called by rtR0TermNative.
+ */
+DECLHIDDEN(void) rtR0MpNtTerm(void)
+{
+ /*
+ * Deregister the processor change callback.
+ */
+ PVOID pvMpCpuChangeCallback = g_pvMpCpuChangeCallback;
+ g_pvMpCpuChangeCallback = NULL;
+ if (pvMpCpuChangeCallback)
+ {
+ AssertReturnVoid(g_pfnrtKeDeregisterProcessorChangeCallback);
+ g_pfnrtKeDeregisterProcessorChangeCallback(pvMpCpuChangeCallback);
+ }
+}
+
+
+DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
+{
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
+{
+}
+
+
+/**
+ * Implements the NT PROCESSOR_CALLBACK_FUNCTION callback function.
+ *
+ * This maintains the g_rtMpNtCpuSet and works MP notification callbacks. When
+ * registered, it's called for each active CPU in the system, avoiding racing
+ * CPU hotplugging (as well as testing the callback).
+ *
+ * @param pvUser User context (not used).
+ * @param pChangeCtx Change context (in).
+ * @param prcOperationStatus Operation status (in/out).
+ *
+ * @remarks ASSUMES no concurrent execution of KeProcessorAddCompleteNotify
+ * notification callbacks. At least during callback registration
+ * callout, we're owning KiDynamicProcessorLock.
+ *
+ * @remarks When registering the handler, we first get KeProcessorAddStartNotify
+ * callbacks for all active CPUs, and after they all succeed we get the
+ * KeProcessorAddCompleteNotify callbacks.
+ */
+static VOID __stdcall rtR0NtMpProcessorChangeCallback(void *pvUser, PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT pChangeCtx,
+ PNTSTATUS prcOperationStatus)
+{
+ RT_NOREF(pvUser, prcOperationStatus);
+ switch (pChangeCtx->State)
+ {
+ /*
+ * Check whether we can deal with the CPU, failing the start operation if we
+ * can't. The checks we are doing here are to avoid complicated/impossible
+ * cases in KeProcessorAddCompleteNotify. They are really just verify specs.
+ */
+ case KeProcessorAddStartNotify:
+ {
+ NTSTATUS rcNt = STATUS_SUCCESS;
+ if (pChangeCtx->NtNumber < RTCPUSET_MAX_CPUS)
+ {
+ if (pChangeCtx->NtNumber >= g_cRtMpNtMaxCpus)
+ {
+ DbgPrint("IPRT: KeProcessorAddStartNotify failure: NtNumber=%u is higher than the max CPU count (%u)!\n",
+ pChangeCtx->NtNumber, g_cRtMpNtMaxCpus);
+ rcNt = STATUS_INTERNAL_ERROR;
+ }
+
+ /* The ProcessNumber field was introduced in Windows 7. */
+ PROCESSOR_NUMBER ProcNum;
+ if (g_pfnrtKeGetProcessorIndexFromNumber)
+ {
+ ProcNum = pChangeCtx->ProcNumber;
+ KEPROCESSORINDEX idxCpu = g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum);
+ if (idxCpu != pChangeCtx->NtNumber)
+ {
+ DbgPrint("IPRT: KeProcessorAddStartNotify failure: g_pfnrtKeGetProcessorIndexFromNumber(%u.%u) -> %u, expected %u!\n",
+ ProcNum.Group, ProcNum.Number, idxCpu, pChangeCtx->NtNumber);
+ rcNt = STATUS_INTERNAL_ERROR;
+ }
+ }
+ else
+ {
+ ProcNum.Group = 0;
+ ProcNum.Number = pChangeCtx->NtNumber;
+ }
+
+ if ( ProcNum.Group < RT_ELEMENTS(g_aRtMpNtCpuGroups)
+ && ProcNum.Number < RT_ELEMENTS(g_aRtMpNtCpuGroups[0].aidxCpuSetMembers))
+ {
+ if (ProcNum.Group >= g_cRtMpNtMaxGroups)
+ {
+ DbgPrint("IPRT: KeProcessorAddStartNotify failure: %u.%u is out of range - max groups: %u!\n",
+ ProcNum.Group, ProcNum.Number, g_cRtMpNtMaxGroups);
+ rcNt = STATUS_INTERNAL_ERROR;
+ }
+
+ if (ProcNum.Number < g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus)
+ {
+ Assert(g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] != -1);
+ if (g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] == -1)
+ {
+ DbgPrint("IPRT: KeProcessorAddStartNotify failure: Internal error! %u.%u was assigned -1 as set index!\n",
+ ProcNum.Group, ProcNum.Number);
+ rcNt = STATUS_INTERNAL_ERROR;
+ }
+
+ Assert(g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] != NIL_RTCPUID);
+ if (g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] == NIL_RTCPUID)
+ {
+ DbgPrint("IPRT: KeProcessorAddStartNotify failure: Internal error! %u (%u.%u) translates to NIL_RTCPUID!\n",
+ pChangeCtx->NtNumber, ProcNum.Group, ProcNum.Number);
+ rcNt = STATUS_INTERNAL_ERROR;
+ }
+ }
+ else
+ {
+ DbgPrint("IPRT: KeProcessorAddStartNotify failure: max processors in group %u is %u, cannot add %u to it!\n",
+ ProcNum.Group, g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus, ProcNum.Group, ProcNum.Number);
+ rcNt = STATUS_INTERNAL_ERROR;
+ }
+ }
+ else
+ {
+ DbgPrint("IPRT: KeProcessorAddStartNotify failure: %u.%u is out of range (max %u.%u)!\n",
+ ProcNum.Group, ProcNum.Number, RT_ELEMENTS(g_aRtMpNtCpuGroups), RT_ELEMENTS(g_aRtMpNtCpuGroups[0].aidxCpuSetMembers));
+ rcNt = STATUS_INTERNAL_ERROR;
+ }
+ }
+ else
+ {
+ DbgPrint("IPRT: KeProcessorAddStartNotify failure: NtNumber=%u is outside RTCPUSET_MAX_CPUS (%u)!\n",
+ pChangeCtx->NtNumber, RTCPUSET_MAX_CPUS);
+ rcNt = STATUS_INTERNAL_ERROR;
+ }
+ if (!NT_SUCCESS(rcNt))
+ *prcOperationStatus = rcNt;
+ break;
+ }
+
+ /*
+ * Update the globals. Since we've checked out range limits and other
+ * limitations already we just AssertBreak here.
+ */
+ case KeProcessorAddCompleteNotify:
+ {
+ /*
+ * Calc the processor number and assert conditions checked in KeProcessorAddStartNotify.
+ */
+ AssertBreak(pChangeCtx->NtNumber < RTCPUSET_MAX_CPUS);
+ AssertBreak(pChangeCtx->NtNumber < g_cRtMpNtMaxCpus);
+ Assert(pChangeCtx->NtNumber == g_cRtMpNtActiveCpus); /* light assumption */
+ PROCESSOR_NUMBER ProcNum;
+ if (g_pfnrtKeGetProcessorIndexFromNumber)
+ {
+ ProcNum = pChangeCtx->ProcNumber;
+ AssertBreak(g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum) == pChangeCtx->NtNumber);
+ AssertBreak(ProcNum.Group < RT_ELEMENTS(g_aRtMpNtCpuGroups));
+ AssertBreak(ProcNum.Group < g_cRtMpNtMaxGroups);
+ }
+ else
+ {
+ ProcNum.Group = 0;
+ ProcNum.Number = pChangeCtx->NtNumber;
+ }
+ AssertBreak(ProcNum.Number < RT_ELEMENTS(g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers));
+ AssertBreak(ProcNum.Number < g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus);
+ AssertBreak(g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] != -1);
+ AssertBreak(g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] != NIL_RTCPUID);
+
+ /*
+ * Add ourselves to the online CPU set and update the active CPU count.
+ */
+ RTCpuSetAddByIndex(&g_rtMpNtCpuSet, pChangeCtx->NtNumber);
+ ASMAtomicIncU32(&g_cRtMpNtActiveCpus);
+
+ /*
+ * Update the group info.
+ *
+ * If the index prediction failed (real hotplugging callbacks only) we
+ * have to switch it around. This is particularly annoying when we
+ * use the index as the ID.
+ */
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ RTCPUID idCpu = RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
+ RTCPUID idOld = g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber];
+ if ((idOld & ~RTMPNT_ID_F_INACTIVE) != idCpu)
+ {
+ Assert(idOld & RTMPNT_ID_F_INACTIVE);
+ int idxDest = g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number];
+ g_aRtMpNtCpuGroups[rtMpCpuIdGetGroup(idOld)].aidxCpuSetMembers[rtMpCpuIdGetGroupMember(idOld)] = idxDest;
+ g_aidRtMpNtByCpuSetIdx[idxDest] = idOld;
+ }
+ g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] = idCpu;
+#else
+ Assert(g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] == pChangeCtx->NtNumber);
+ int idxDest = g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number];
+ if ((ULONG)idxDest != pChangeCtx->NtNumber)
+ {
+ bool fFound = false;
+ uint32_t idxOldGroup = g_cRtMpNtMaxGroups;
+ while (idxOldGroup-- > 0 && !fFound)
+ {
+ uint32_t idxMember = g_aRtMpNtCpuGroups[idxOldGroup].cMaxCpus;
+ while (idxMember-- > 0)
+ if (g_aRtMpNtCpuGroups[idxOldGroup].aidxCpuSetMembers[idxMember] == (int)pChangeCtx->NtNumber)
+ {
+ g_aRtMpNtCpuGroups[idxOldGroup].aidxCpuSetMembers[idxMember] = idxDest;
+ fFound = true;
+ break;
+ }
+ }
+ Assert(fFound);
+ }
+#endif
+ g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] = pChangeCtx->NtNumber;
+
+ /*
+ * Do MP notification callbacks.
+ */
+ rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, pChangeCtx->NtNumber);
+ break;
+ }
+
+ case KeProcessorAddFailureNotify:
+ /* ignore */
+ break;
+
+ default:
+ AssertMsgFailed(("State=%u\n", pChangeCtx->State));
+ }
+}
+
+
+/**
+ * Wrapper around KeQueryLogicalProcessorRelationship.
+ *
+ * @returns IPRT status code.
+ * @param ppInfo Where to return the info. Pass to RTMemFree when done.
+ */
+static int rtR0NtInitQueryGroupRelations(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX **ppInfo)
+{
+ ULONG cbInfo = sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)
+ + g_cRtMpNtMaxGroups * sizeof(GROUP_RELATIONSHIP);
+ NTSTATUS rcNt;
+ do
+ {
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pInfo = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)RTMemAlloc(cbInfo);
+ if (pInfo)
+ {
+ rcNt = g_pfnrtKeQueryLogicalProcessorRelationship(NULL /*pProcNumber*/, RelationGroup, pInfo, &cbInfo);
+ if (NT_SUCCESS(rcNt))
+ {
+ *ppInfo = pInfo;
+ return VINF_SUCCESS;
+ }
+
+ RTMemFree(pInfo);
+ pInfo = NULL;
+ }
+ else
+ rcNt = STATUS_NO_MEMORY;
+ } while (rcNt == STATUS_INFO_LENGTH_MISMATCH);
+ DbgPrint("IPRT: Fatal: KeQueryLogicalProcessorRelationship failed: %#x\n", rcNt);
+ AssertMsgFailed(("KeQueryLogicalProcessorRelationship failed: %#x\n", rcNt));
+ return RTErrConvertFromNtStatus(rcNt);
+}
+
+
+
+
+
+RTDECL(RTCPUID) RTMpCpuId(void)
+{
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ PROCESSOR_NUMBER ProcNum;
+ ProcNum.Group = 0;
+ if (g_pfnrtKeGetCurrentProcessorNumberEx)
+ {
+ ProcNum.Number = 0;
+ g_pfnrtKeGetCurrentProcessorNumberEx(&ProcNum);
+ }
+ else
+ ProcNum.Number = KeGetCurrentProcessorNumber(); /* Number is 8-bit, so we're not subject to BYTE -> WORD upgrade in WDK. */
+ return RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
+
+#else
+
+ if (g_pfnrtKeGetCurrentProcessorNumberEx)
+ {
+ KEPROCESSORINDEX idxCpu = g_pfnrtKeGetCurrentProcessorNumberEx(NULL);
+ Assert(idxCpu < RTCPUSET_MAX_CPUS);
+ return idxCpu;
+ }
+
+ return (uint8_t)KeGetCurrentProcessorNumber(); /* PCR->Number was changed from BYTE to WORD in the WDK, thus the cast. */
+#endif
+}
+
+
+RTDECL(int) RTMpCurSetIndex(void)
+{
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+ if (g_pfnrtKeGetCurrentProcessorNumberEx)
+ {
+ KEPROCESSORINDEX idxCpu = g_pfnrtKeGetCurrentProcessorNumberEx(NULL);
+ Assert(idxCpu < RTCPUSET_MAX_CPUS);
+ return idxCpu;
+ }
+ return (uint8_t)KeGetCurrentProcessorNumber(); /* PCR->Number was changed from BYTE to WORD in the WDK, thus the cast. */
+#else
+ return (int)RTMpCpuId();
+#endif
+}
+
+
+RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
+{
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+ PROCESSOR_NUMBER ProcNum = { 0 , 0, 0 };
+ KEPROCESSORINDEX idxCpu = g_pfnrtKeGetCurrentProcessorNumberEx(&ProcNum);
+ Assert(idxCpu < RTCPUSET_MAX_CPUS);
+ *pidCpu = RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
+ return idxCpu;
+#else
+ return *pidCpu = RTMpCpuId();
+#endif
+}
+
+
+RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
+{
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+ if (idCpu != NIL_RTCPUID)
+ {
+ if (g_pfnrtKeGetProcessorIndexFromNumber)
+ {
+ PROCESSOR_NUMBER ProcNum;
+ ProcNum.Group = rtMpCpuIdGetGroup(idCpu);
+ ProcNum.Number = rtMpCpuIdGetGroupMember(idCpu);
+ ProcNum.Reserved = 0;
+ KEPROCESSORINDEX idxCpu = g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum);
+ if (idxCpu != INVALID_PROCESSOR_INDEX)
+ {
+ Assert(idxCpu < g_cRtMpNtMaxCpus);
+ Assert((ULONG)g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] == idxCpu);
+ return idxCpu;
+ }
+
+ /* Since NT assigned indexes as the CPUs come online, we cannot produce an ID <-> index
+ mapping for not-yet-onlined CPUS that is consistent. We just have to do our best... */
+ if ( ProcNum.Group < g_cRtMpNtMaxGroups
+ && ProcNum.Number < g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus)
+ return g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number];
+ }
+ else if (rtMpCpuIdGetGroup(idCpu) == 0)
+ return rtMpCpuIdGetGroupMember(idCpu);
+ }
+ return -1;
+#else
+ /* 1:1 mapping, just do range checks. */
+ return idCpu < RTCPUSET_MAX_CPUS ? (int)idCpu : -1;
+#endif
+}
+
+
+RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
+{
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+ if ((unsigned)iCpu < g_cRtMpNtMaxCpus)
+ {
+ if (g_pfnrtKeGetProcessorIndexFromNumber)
+ {
+ PROCESSOR_NUMBER ProcNum = { 0, 0, 0 };
+ NTSTATUS rcNt = g_pfnrtKeGetProcessorNumberFromIndex(iCpu, &ProcNum);
+ if (NT_SUCCESS(rcNt))
+ {
+ Assert(ProcNum.Group <= g_cRtMpNtMaxGroups);
+ Assert( (g_aidRtMpNtByCpuSetIdx[iCpu] & ~RTMPNT_ID_F_INACTIVE)
+ == RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number));
+ return RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
+ }
+ }
+ return g_aidRtMpNtByCpuSetIdx[iCpu];
+ }
+ return NIL_RTCPUID;
+#else
+ /* 1:1 mapping, just do range checks. */
+ return (unsigned)iCpu < RTCPUSET_MAX_CPUS ? iCpu : NIL_RTCPUID;
+#endif
+}
+
+
+RTDECL(int) RTMpSetIndexFromCpuGroupMember(uint32_t idxGroup, uint32_t idxMember)
+{
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+ if (idxGroup < g_cRtMpNtMaxGroups)
+ if (idxMember < g_aRtMpNtCpuGroups[idxGroup].cMaxCpus)
+ return g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember];
+ return -1;
+}
+
+
+RTDECL(uint32_t) RTMpGetCpuGroupCounts(uint32_t idxGroup, uint32_t *pcActive)
+{
+ if (idxGroup < g_cRtMpNtMaxGroups)
+ {
+ if (pcActive)
+ *pcActive = g_aRtMpNtCpuGroups[idxGroup].cActiveCpus;
+ return g_aRtMpNtCpuGroups[idxGroup].cMaxCpus;
+ }
+ if (pcActive)
+ *pcActive = 0;
+ return 0;
+}
+
+
+RTDECL(uint32_t) RTMpGetMaxCpuGroupCount(void)
+{
+ return g_cRtMpNtMaxGroups;
+}
+
+
+RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
+{
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ return RTMPCPUID_FROM_GROUP_AND_NUMBER(g_cRtMpNtMaxGroups - 1, g_aRtMpNtCpuGroups[g_cRtMpNtMaxGroups - 1].cMaxCpus - 1);
+#else
+ /* According to MSDN the processor indexes goes from 0 to the maximum
+ number of CPUs in the system. We've check this in initterm-r0drv-nt.cpp. */
+ return g_cRtMpNtMaxCpus - 1;
+#endif
+}
+
+
+RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
+{
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+ return RTCpuSetIsMember(&g_rtMpNtCpuSet, idCpu);
+}
+
+
+RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
+{
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+ if (idCpu != NIL_RTCPUID)
+ {
+ unsigned idxGroup = rtMpCpuIdGetGroup(idCpu);
+ if (idxGroup < g_cRtMpNtMaxGroups)
+ return rtMpCpuIdGetGroupMember(idCpu) < g_aRtMpNtCpuGroups[idxGroup].cMaxCpus;
+ }
+ return false;
+
+#else
+ /* A possible CPU ID is one with a value lower than g_cRtMpNtMaxCpus (see
+ comment in RTMpGetMaxCpuId). */
+ return idCpu < g_cRtMpNtMaxCpus;
+#endif
+}
+
+
+
+RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
+{
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+ /* The set of possible CPU IDs(/indexes) are from 0 up to
+ g_cRtMpNtMaxCpus (see comment in RTMpGetMaxCpuId). */
+ RTCpuSetEmpty(pSet);
+ int idxCpu = g_cRtMpNtMaxCpus;
+ while (idxCpu-- > 0)
+ RTCpuSetAddByIndex(pSet, idxCpu);
+ return pSet;
+}
+
+
+RTDECL(RTCPUID) RTMpGetCount(void)
+{
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+ return g_cRtMpNtMaxCpus;
+}
+
+
+RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
+{
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+ *pSet = g_rtMpNtCpuSet;
+ return pSet;
+}
+
+
+RTDECL(RTCPUID) RTMpGetOnlineCount(void)
+{
+ RTCPUSET Set;
+ RTMpGetOnlineSet(&Set);
+ return RTCpuSetCount(&Set);
+}
+
+
+RTDECL(RTCPUID) RTMpGetOnlineCoreCount(void)
+{
+ /** @todo fix me */
+ return RTMpGetOnlineCount();
+}
+
+
+
+#if 0
+/* Experiment with checking the undocumented KPRCB structure
+ * 'dt nt!_kprcb 0xaddress' shows the layout
+ */
+typedef struct
+{
+ LIST_ENTRY DpcListHead;
+ ULONG_PTR DpcLock;
+ volatile ULONG DpcQueueDepth;
+ ULONG DpcQueueCount;
+} KDPC_DATA, *PKDPC_DATA;
+
+RTDECL(bool) RTMpIsCpuWorkPending(void)
+{
+ uint8_t *pkprcb;
+ PKDPC_DATA pDpcData;
+
+ _asm {
+ mov eax, fs:0x20
+ mov pkprcb, eax
+ }
+ pDpcData = (PKDPC_DATA)(pkprcb + 0x19e0);
+ if (pDpcData->DpcQueueDepth)
+ return true;
+
+ pDpcData++;
+ if (pDpcData->DpcQueueDepth)
+ return true;
+ return false;
+}
+#else
+RTDECL(bool) RTMpIsCpuWorkPending(void)
+{
+ /** @todo not implemented */
+ return false;
+}
+#endif
+
+
+/**
+ * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
+ * the RTMpOnAll case.
+ *
+ * @param uUserCtx The user context argument (PRTMPARGS).
+ */
+static ULONG_PTR rtmpNtOnAllBroadcastIpiWrapper(ULONG_PTR uUserCtx)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
+ /*ASMAtomicIncU32(&pArgs->cHits); - not needed */
+ pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
+ return 0;
+}
+
+
+/**
+ * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
+ * the RTMpOnOthers case.
+ *
+ * @param uUserCtx The user context argument (PRTMPARGS).
+ */
+static ULONG_PTR rtmpNtOnOthersBroadcastIpiWrapper(ULONG_PTR uUserCtx)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
+ RTCPUID idCpu = RTMpCpuId();
+ if (pArgs->idCpu != idCpu)
+ {
+ /*ASMAtomicIncU32(&pArgs->cHits); - not needed */
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ }
+ return 0;
+}
+
+
+/**
+ * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
+ * the RTMpOnPair case.
+ *
+ * @param uUserCtx The user context argument (PRTMPARGS).
+ */
+static ULONG_PTR rtmpNtOnPairBroadcastIpiWrapper(ULONG_PTR uUserCtx)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
+ RTCPUID idCpu = RTMpCpuId();
+ if ( pArgs->idCpu == idCpu
+ || pArgs->idCpu2 == idCpu)
+ {
+ ASMAtomicIncU32(&pArgs->cHits);
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ }
+ return 0;
+}
+
+
+/**
+ * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
+ * the RTMpOnSpecific case.
+ *
+ * @param uUserCtx The user context argument (PRTMPARGS).
+ */
+static ULONG_PTR rtmpNtOnSpecificBroadcastIpiWrapper(ULONG_PTR uUserCtx)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
+ RTCPUID idCpu = RTMpCpuId();
+ if (pArgs->idCpu == idCpu)
+ {
+ ASMAtomicIncU32(&pArgs->cHits);
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ }
+ return 0;
+}
+
+
+/**
+ * Internal worker for the RTMpOn* APIs using KeIpiGenericCall.
+ *
+ * @returns VINF_SUCCESS.
+ * @param pfnWorker The callback.
+ * @param pvUser1 User argument 1.
+ * @param pvUser2 User argument 2.
+ * @param pfnNativeWrapper The wrapper between the NT and IPRT callbacks.
+ * @param idCpu First CPU to match, ultimately specific to the
+ * pfnNativeWrapper used.
+ * @param idCpu2 Second CPU to match, ultimately specific to the
+ * pfnNativeWrapper used.
+ * @param pcHits Where to return the number of this. Optional.
+ */
+static int rtMpCallUsingBroadcastIpi(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2,
+ PKIPI_BROADCAST_WORKER pfnNativeWrapper, RTCPUID idCpu, RTCPUID idCpu2,
+ uint32_t *pcHits)
+{
+ RTMPARGS Args;
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = idCpu;
+ Args.idCpu2 = idCpu2;
+ Args.cRefs = 0;
+ Args.cHits = 0;
+
+ AssertPtr(g_pfnrtKeIpiGenericCall);
+ g_pfnrtKeIpiGenericCall(pfnNativeWrapper, (uintptr_t)&Args);
+ if (pcHits)
+ *pcHits = Args.cHits;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Wrapper between the native nt per-cpu callbacks and PFNRTWORKER
+ *
+ * @param Dpc DPC object
+ * @param DeferredContext Context argument specified by KeInitializeDpc
+ * @param SystemArgument1 Argument specified by KeInsertQueueDpc
+ * @param SystemArgument2 Argument specified by KeInsertQueueDpc
+ */
+static VOID rtmpNtDPCWrapper(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)DeferredContext;
+ RT_NOREF3(Dpc, SystemArgument1, SystemArgument2);
+
+ ASMAtomicIncU32(&pArgs->cHits);
+ pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
+
+ /* Dereference the argument structure. */
+ int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
+ Assert(cRefs >= 0);
+ if (cRefs == 0)
+ RTMemFree(pArgs);
+}
+
+
+/**
+ * Wrapper around KeSetTargetProcessorDpcEx / KeSetTargetProcessorDpc.
+ *
+ * This is shared with the timer code.
+ *
+ * @returns IPRT status code (errors are asserted).
+ * @param pDpc The DPC.
+ * @param idCpu The ID of the new target CPU.
+ */
+DECLHIDDEN(int) rtMpNtSetTargetProcessorDpc(KDPC *pDpc, RTCPUID idCpu)
+{
+ if (g_pfnrtKeSetTargetProcessorDpcEx)
+ {
+ /* Convert to stupid process number (bet KeSetTargetProcessorDpcEx does
+ the reverse conversion internally). */
+ PROCESSOR_NUMBER ProcNum;
+ NTSTATUS rcNt = g_pfnrtKeGetProcessorNumberFromIndex(RTMpCpuIdToSetIndex(idCpu), &ProcNum);
+ AssertMsgReturn(NT_SUCCESS(rcNt),
+ ("KeGetProcessorNumberFromIndex(%u) -> %#x\n", idCpu, rcNt),
+ RTErrConvertFromNtStatus(rcNt));
+
+ rcNt = g_pfnrtKeSetTargetProcessorDpcEx(pDpc, &ProcNum);
+ AssertMsgReturn(NT_SUCCESS(rcNt),
+ ("KeSetTargetProcessorDpcEx(,%u(%u/%u)) -> %#x\n", idCpu, ProcNum.Group, ProcNum.Number, rcNt),
+ RTErrConvertFromNtStatus(rcNt));
+ }
+ else if (g_pfnrtKeSetTargetProcessorDpc)
+ g_pfnrtKeSetTargetProcessorDpc(pDpc, RTMpCpuIdToSetIndex(idCpu));
+ else
+ return VERR_NOT_SUPPORTED;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Internal worker for the RTMpOn* APIs.
+ *
+ * @returns IPRT status code.
+ * @param pfnWorker The callback.
+ * @param pvUser1 User argument 1.
+ * @param pvUser2 User argument 2.
+ * @param enmCpuid What to do / is idCpu valid.
+ * @param idCpu Used if enmCpuid is RT_NT_CPUID_SPECIFIC or
+ * RT_NT_CPUID_PAIR, otherwise ignored.
+ * @param idCpu2 Used if enmCpuid is RT_NT_CPUID_PAIR, otherwise ignored.
+ * @param pcHits Where to return the number of this. Optional.
+ */
+static int rtMpCallUsingDpcs(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2,
+ RT_NT_CPUID enmCpuid, RTCPUID idCpu, RTCPUID idCpu2, uint32_t *pcHits)
+{
+#if 0
+ /* KeFlushQueuedDpcs must be run at IRQL PASSIVE_LEVEL according to MSDN, but the
+ * driver verifier doesn't complain...
+ */
+ AssertMsg(KeGetCurrentIrql() == PASSIVE_LEVEL, ("%d != %d (PASSIVE_LEVEL)\n", KeGetCurrentIrql(), PASSIVE_LEVEL));
+#endif
+ /* KeFlushQueuedDpcs is not present in Windows 2000; import it dynamically so we can just fail this call. */
+ if (!g_pfnrtNtKeFlushQueuedDpcs)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Make a copy of the active CPU set and figure out how many KDPCs we really need.
+ * We must not try setup DPCs for CPUs which aren't there, because that may fail.
+ */
+ RTCPUSET OnlineSet = g_rtMpNtCpuSet;
+ uint32_t cDpcsNeeded;
+ switch (enmCpuid)
+ {
+ case RT_NT_CPUID_SPECIFIC:
+ cDpcsNeeded = 1;
+ break;
+ case RT_NT_CPUID_PAIR:
+ cDpcsNeeded = 2;
+ break;
+ default:
+ do
+ {
+ cDpcsNeeded = g_cRtMpNtActiveCpus;
+ OnlineSet = g_rtMpNtCpuSet;
+ } while (cDpcsNeeded != g_cRtMpNtActiveCpus);
+ break;
+ }
+
+ /*
+ * Allocate an RTMPARGS structure followed by cDpcsNeeded KDPCs
+ * and initialize them.
+ */
+ PRTMPARGS pArgs = (PRTMPARGS)RTMemAllocZ(sizeof(RTMPARGS) + cDpcsNeeded * sizeof(KDPC));
+ if (!pArgs)
+ return VERR_NO_MEMORY;
+
+ pArgs->pfnWorker = pfnWorker;
+ pArgs->pvUser1 = pvUser1;
+ pArgs->pvUser2 = pvUser2;
+ pArgs->idCpu = NIL_RTCPUID;
+ pArgs->idCpu2 = NIL_RTCPUID;
+ pArgs->cHits = 0;
+ pArgs->cRefs = 1;
+
+ int rc;
+ KDPC *paExecCpuDpcs = (KDPC *)(pArgs + 1);
+ if (enmCpuid == RT_NT_CPUID_SPECIFIC)
+ {
+ KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
+ if (g_pfnrtKeSetImportanceDpc)
+ g_pfnrtKeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
+ rc = rtMpNtSetTargetProcessorDpc(&paExecCpuDpcs[0], idCpu);
+ pArgs->idCpu = idCpu;
+ }
+ else if (enmCpuid == RT_NT_CPUID_PAIR)
+ {
+ KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
+ if (g_pfnrtKeSetImportanceDpc)
+ g_pfnrtKeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
+ rc = rtMpNtSetTargetProcessorDpc(&paExecCpuDpcs[0], idCpu);
+ pArgs->idCpu = idCpu;
+
+ KeInitializeDpc(&paExecCpuDpcs[1], rtmpNtDPCWrapper, pArgs);
+ if (g_pfnrtKeSetImportanceDpc)
+ g_pfnrtKeSetImportanceDpc(&paExecCpuDpcs[1], HighImportance);
+ if (RT_SUCCESS(rc))
+ rc = rtMpNtSetTargetProcessorDpc(&paExecCpuDpcs[1], (int)idCpu2);
+ pArgs->idCpu2 = idCpu2;
+ }
+ else
+ {
+ rc = VINF_SUCCESS;
+ for (uint32_t i = 0; i < cDpcsNeeded && RT_SUCCESS(rc); i++)
+ if (RTCpuSetIsMemberByIndex(&OnlineSet, i))
+ {
+ KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs);
+ if (g_pfnrtKeSetImportanceDpc)
+ g_pfnrtKeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance);
+ rc = rtMpNtSetTargetProcessorDpc(&paExecCpuDpcs[i], RTMpCpuIdFromSetIndex(i));
+ }
+ }
+ if (RT_FAILURE(rc))
+ {
+ RTMemFree(pArgs);
+ return rc;
+ }
+
+ /*
+ * Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
+ * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
+ */
+ KIRQL oldIrql;
+ KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
+
+ /*
+ * We cannot do other than assume a 1:1 relationship between the
+ * affinity mask and the process despite the warnings in the docs.
+ * If someone knows a better way to get this done, please let bird know.
+ */
+ ASMCompilerBarrier(); /* paranoia */
+ if (enmCpuid == RT_NT_CPUID_SPECIFIC)
+ {
+ ASMAtomicIncS32(&pArgs->cRefs);
+ BOOLEAN fRc = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
+ Assert(fRc); NOREF(fRc);
+ }
+ else if (enmCpuid == RT_NT_CPUID_PAIR)
+ {
+ ASMAtomicIncS32(&pArgs->cRefs);
+ BOOLEAN fRc = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
+ Assert(fRc); NOREF(fRc);
+
+ ASMAtomicIncS32(&pArgs->cRefs);
+ fRc = KeInsertQueueDpc(&paExecCpuDpcs[1], 0, 0);
+ Assert(fRc); NOREF(fRc);
+ }
+ else
+ {
+ uint32_t iSelf = RTMpCurSetIndex();
+ for (uint32_t i = 0; i < cDpcsNeeded; i++)
+ {
+ if ( (i != iSelf)
+ && RTCpuSetIsMemberByIndex(&OnlineSet, i))
+ {
+ ASMAtomicIncS32(&pArgs->cRefs);
+ BOOLEAN fRc = KeInsertQueueDpc(&paExecCpuDpcs[i], 0, 0);
+ Assert(fRc); NOREF(fRc);
+ }
+ }
+ if (enmCpuid != RT_NT_CPUID_OTHERS)
+ pfnWorker(iSelf, pvUser1, pvUser2);
+ }
+
+ KeLowerIrql(oldIrql);
+
+ /*
+ * Flush all DPCs and wait for completion. (can take long!)
+ */
+ /** @todo Consider changing this to an active wait using some atomic inc/dec
+ * stuff (and check for the current cpu above in the specific case). */
+ /** @todo Seems KeFlushQueuedDpcs doesn't wait for the DPCs to be completely
+ * executed. Seen pArgs being freed while some CPU was using it before
+ * cRefs was added. */
+ if (g_pfnrtNtKeFlushQueuedDpcs)
+ g_pfnrtNtKeFlushQueuedDpcs();
+
+ if (pcHits)
+ *pcHits = pArgs->cHits;
+
+ /* Dereference the argument structure. */
+ int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
+ Assert(cRefs >= 0);
+ if (cRefs == 0)
+ RTMemFree(pArgs);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ if (g_pfnrtKeIpiGenericCall)
+ return rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnAllBroadcastIpiWrapper,
+ NIL_RTCPUID, NIL_RTCPUID, NULL);
+ return rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_ALL, NIL_RTCPUID, NIL_RTCPUID, NULL);
+}
+
+
+RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ if (g_pfnrtKeIpiGenericCall)
+ return rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnOthersBroadcastIpiWrapper,
+ NIL_RTCPUID, NIL_RTCPUID, NULL);
+ return rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_OTHERS, NIL_RTCPUID, NIL_RTCPUID, NULL);
+}
+
+
+RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ int rc;
+ AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
+ if ((fFlags & RTMPON_F_CONCURRENT_EXEC) && !g_pfnrtKeIpiGenericCall)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Check that both CPUs are online before doing the broadcast call.
+ */
+ if ( RTMpIsCpuOnline(idCpu1)
+ && RTMpIsCpuOnline(idCpu2))
+ {
+ /*
+ * The broadcast IPI isn't quite as bad as it could have been, because
+ * it looks like windows doesn't synchronize CPUs on the way out, they
+ * seems to get back to normal work while the pair is still busy.
+ */
+ uint32_t cHits = 0;
+ if (g_pfnrtKeIpiGenericCall)
+ rc = rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnPairBroadcastIpiWrapper, idCpu1, idCpu2, &cHits);
+ else
+ rc = rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_PAIR, idCpu1, idCpu2, &cHits);
+ if (RT_SUCCESS(rc))
+ {
+ Assert(cHits <= 2);
+ if (cHits == 2)
+ rc = VINF_SUCCESS;
+ else if (cHits == 1)
+ rc = VERR_NOT_ALL_CPUS_SHOWED;
+ else if (cHits == 0)
+ rc = VERR_CPU_OFFLINE;
+ else
+ rc = VERR_CPU_IPE_1;
+ }
+ }
+ /*
+ * A CPU must be present to be considered just offline.
+ */
+ else if ( RTMpIsCpuPresent(idCpu1)
+ && RTMpIsCpuPresent(idCpu2))
+ rc = VERR_CPU_OFFLINE;
+ else
+ rc = VERR_CPU_NOT_FOUND;
+ return rc;
+}
+
+
+RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
+{
+ return g_pfnrtKeIpiGenericCall != NULL;
+}
+
+
+/**
+ * Releases a reference to a RTMPNTONSPECIFICARGS heap allocation, freeing it
+ * when the last reference is released.
+ */
+DECLINLINE(void) rtMpNtOnSpecificRelease(PRTMPNTONSPECIFICARGS pArgs)
+{
+ uint32_t cRefs = ASMAtomicDecU32(&pArgs->cRefs);
+ AssertMsg(cRefs <= 1, ("cRefs=%#x\n", cRefs));
+ if (cRefs == 0)
+ RTMemFree(pArgs);
+}
+
+
+/**
+ * Wrapper between the native nt per-cpu callbacks and PFNRTWORKER
+ *
+ * @param Dpc DPC object
+ * @param DeferredContext Context argument specified by KeInitializeDpc
+ * @param SystemArgument1 Argument specified by KeInsertQueueDpc
+ * @param SystemArgument2 Argument specified by KeInsertQueueDpc
+ */
+static VOID rtMpNtOnSpecificDpcWrapper(IN PKDPC Dpc, IN PVOID DeferredContext,
+ IN PVOID SystemArgument1, IN PVOID SystemArgument2)
+{
+ PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)DeferredContext;
+ RT_NOREF3(Dpc, SystemArgument1, SystemArgument2);
+
+ ASMAtomicWriteBool(&pArgs->fExecuting, true);
+
+ pArgs->CallbackArgs.pfnWorker(RTMpCpuId(), pArgs->CallbackArgs.pvUser1, pArgs->CallbackArgs.pvUser2);
+
+ ASMAtomicWriteBool(&pArgs->fDone, true);
+ KeSetEvent(&pArgs->DoneEvt, 1 /*PriorityIncrement*/, FALSE /*Wait*/);
+
+ rtMpNtOnSpecificRelease(pArgs);
+}
+
+
+RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ /*
+ * Don't try mess with an offline CPU.
+ */
+ if (!RTMpIsCpuOnline(idCpu))
+ return !RTMpIsCpuPossible(idCpu)
+ ? VERR_CPU_NOT_FOUND
+ : VERR_CPU_OFFLINE;
+
+ /*
+ * Use the broadcast IPI routine if there are no more than two CPUs online,
+ * or if the current IRQL is unsuitable for KeWaitForSingleObject.
+ */
+ int rc;
+ uint32_t cHits = 0;
+ if ( g_pfnrtKeIpiGenericCall
+ && ( RTMpGetOnlineCount() <= 2
+ || KeGetCurrentIrql() > APC_LEVEL)
+ )
+ {
+ rc = rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnSpecificBroadcastIpiWrapper,
+ idCpu, NIL_RTCPUID, &cHits);
+ if (RT_SUCCESS(rc))
+ {
+ if (cHits == 1)
+ return VINF_SUCCESS;
+ rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1;
+ }
+ return rc;
+ }
+
+#if 0
+ rc = rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_SPECIFIC, idCpu, NIL_RTCPUID, &cHits);
+ if (RT_SUCCESS(rc))
+ {
+ if (cHits == 1)
+ return VINF_SUCCESS;
+ rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1;
+ }
+ return rc;
+
+#else
+ /*
+ * Initialize the argument package and the objects within it.
+ * The package is referenced counted to avoid unnecessary spinning to
+ * synchronize cleanup and prevent stack corruption.
+ */
+ PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)RTMemAllocZ(sizeof(*pArgs));
+ if (!pArgs)
+ return VERR_NO_MEMORY;
+ pArgs->cRefs = 2;
+ pArgs->fExecuting = false;
+ pArgs->fDone = false;
+ pArgs->CallbackArgs.pfnWorker = pfnWorker;
+ pArgs->CallbackArgs.pvUser1 = pvUser1;
+ pArgs->CallbackArgs.pvUser2 = pvUser2;
+ pArgs->CallbackArgs.idCpu = idCpu;
+ pArgs->CallbackArgs.cHits = 0;
+ pArgs->CallbackArgs.cRefs = 2;
+ KeInitializeEvent(&pArgs->DoneEvt, SynchronizationEvent, FALSE /* not signalled */);
+ KeInitializeDpc(&pArgs->Dpc, rtMpNtOnSpecificDpcWrapper, pArgs);
+ if (g_pfnrtKeSetImportanceDpc)
+ g_pfnrtKeSetImportanceDpc(&pArgs->Dpc, HighImportance);
+ rc = rtMpNtSetTargetProcessorDpc(&pArgs->Dpc, idCpu);
+ if (RT_FAILURE(rc))
+ {
+ RTMemFree(pArgs);
+ return rc;
+ }
+
+ /*
+ * Disable preemption while we check the current processor and inserts the DPC.
+ */
+ KIRQL bOldIrql;
+ KeRaiseIrql(DISPATCH_LEVEL, &bOldIrql);
+ ASMCompilerBarrier(); /* paranoia */
+
+ if (RTMpCpuId() == idCpu)
+ {
+ /* Just execute the callback on the current CPU. */
+ pfnWorker(idCpu, pvUser1, pvUser2);
+ KeLowerIrql(bOldIrql);
+
+ RTMemFree(pArgs);
+ return VINF_SUCCESS;
+ }
+
+ /* Different CPU, so queue it if the CPU is still online. */
+ if (RTMpIsCpuOnline(idCpu))
+ {
+ BOOLEAN fRc = KeInsertQueueDpc(&pArgs->Dpc, 0, 0);
+ Assert(fRc); NOREF(fRc);
+ KeLowerIrql(bOldIrql);
+
+ uint64_t const nsRealWaitTS = RTTimeNanoTS();
+
+ /*
+ * Wait actively for a while in case the CPU/thread responds quickly.
+ */
+ uint32_t cLoopsLeft = 0x20000;
+ while (cLoopsLeft-- > 0)
+ {
+ if (pArgs->fDone)
+ {
+ rtMpNtOnSpecificRelease(pArgs);
+ return VINF_SUCCESS;
+ }
+ ASMNopPause();
+ }
+
+ /*
+ * It didn't respond, so wait on the event object, poking the CPU if it's slow.
+ */
+ LARGE_INTEGER Timeout;
+ Timeout.QuadPart = -10000; /* 1ms */
+ NTSTATUS rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
+ if (rcNt == STATUS_SUCCESS)
+ {
+ rtMpNtOnSpecificRelease(pArgs);
+ return VINF_SUCCESS;
+ }
+
+ /* If it hasn't respondend yet, maybe poke it and wait some more. */
+ if (rcNt == STATUS_TIMEOUT)
+ {
+ if ( !pArgs->fExecuting
+ && ( g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiW7Plus
+ || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiPreW7))
+ RTMpPokeCpu(idCpu);
+
+ Timeout.QuadPart = -1280000; /* 128ms */
+ rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
+ if (rcNt == STATUS_SUCCESS)
+ {
+ rtMpNtOnSpecificRelease(pArgs);
+ return VINF_SUCCESS;
+ }
+ }
+
+ /*
+ * Something weird is happening, try bail out.
+ */
+ if (KeRemoveQueueDpc(&pArgs->Dpc))
+ {
+ RTMemFree(pArgs); /* DPC was still queued, so we can return without further ado. */
+ LogRel(("RTMpOnSpecific(%#x): Not processed after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
+ }
+ else
+ {
+ /* DPC is running, wait a good while for it to complete. */
+ LogRel(("RTMpOnSpecific(%#x): Still running after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
+
+ Timeout.QuadPart = -30*1000*1000*10; /* 30 seconds */
+ rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
+ if (rcNt != STATUS_SUCCESS)
+ LogRel(("RTMpOnSpecific(%#x): Giving up on running worker after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
+ }
+ rc = RTErrConvertFromNtStatus(rcNt);
+ }
+ else
+ {
+ /* CPU is offline.*/
+ KeLowerIrql(bOldIrql);
+ rc = !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE;
+ }
+
+ rtMpNtOnSpecificRelease(pArgs);
+ return rc;
+#endif
+}
+
+
+
+
+static VOID rtMpNtPokeCpuDummy(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
+{
+ NOREF(Dpc);
+ NOREF(DeferredContext);
+ NOREF(SystemArgument1);
+ NOREF(SystemArgument2);
+}
+
+
+/** Callback used by rtMpPokeCpuUsingBroadcastIpi. */
+static ULONG_PTR rtMpIpiGenericCall(ULONG_PTR Argument)
+{
+ NOREF(Argument);
+ return 0;
+}
+
+
+/**
+ * RTMpPokeCpu worker that uses broadcast IPIs for doing the work.
+ *
+ * @returns VINF_SUCCESS
+ * @param idCpu The CPU identifier.
+ */
+int rtMpPokeCpuUsingBroadcastIpi(RTCPUID idCpu)
+{
+ NOREF(idCpu);
+ g_pfnrtKeIpiGenericCall(rtMpIpiGenericCall, 0);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * RTMpPokeCpu worker that uses the Windows 7 and later version of
+ * HalRequestIpip to get the job done.
+ *
+ * @returns VINF_SUCCESS
+ * @param idCpu The CPU identifier.
+ */
+int rtMpPokeCpuUsingHalReqestIpiW7Plus(RTCPUID idCpu)
+{
+ /* idCpu is an HAL processor index, so we can use it directly. */
+ KAFFINITY_EX Target;
+ g_pfnrtKeInitializeAffinityEx(&Target);
+ g_pfnrtKeAddProcessorAffinityEx(&Target, idCpu);
+
+ g_pfnrtHalRequestIpiW7Plus(0, &Target);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * RTMpPokeCpu worker that uses the Vista and earlier version of HalRequestIpip
+ * to get the job done.
+ *
+ * @returns VINF_SUCCESS
+ * @param idCpu The CPU identifier.
+ */
+int rtMpPokeCpuUsingHalReqestIpiPreW7(RTCPUID idCpu)
+{
+ __debugbreak(); /** @todo this code needs testing!! */
+ KAFFINITY Target = 1;
+ Target <<= idCpu;
+ g_pfnrtHalRequestIpiPreW7(Target);
+ return VINF_SUCCESS;
+}
+
+int rtMpPokeCpuUsingFailureNotSupported(RTCPUID idCpu)
+{
+ NOREF(idCpu);
+ return VERR_NOT_SUPPORTED;
+}
+
+int rtMpPokeCpuUsingDpc(RTCPUID idCpu)
+{
+ Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+ /*
+ * APC fallback.
+ */
+ static KDPC s_aPokeDpcs[RTCPUSET_MAX_CPUS] = {0};
+ static bool s_fPokeDPCsInitialized = false;
+
+ if (!s_fPokeDPCsInitialized)
+ {
+ for (unsigned i = 0; i < g_cRtMpNtMaxCpus; i++)
+ {
+ KeInitializeDpc(&s_aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL);
+ if (g_pfnrtKeSetImportanceDpc)
+ g_pfnrtKeSetImportanceDpc(&s_aPokeDpcs[i], HighImportance);
+ int rc = rtMpNtSetTargetProcessorDpc(&s_aPokeDpcs[i], idCpu);
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+
+ s_fPokeDPCsInitialized = true;
+ }
+
+ /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
+ KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL. */
+ KIRQL oldIrql;
+ KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
+
+ if (g_pfnrtKeSetImportanceDpc)
+ g_pfnrtKeSetImportanceDpc(&s_aPokeDpcs[idCpu], HighImportance);
+ g_pfnrtKeSetTargetProcessorDpc(&s_aPokeDpcs[idCpu], (int)idCpu);
+
+ /* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately.
+ Note! Not true on at least Vista & Windows 7 */
+ BOOLEAN fRet = KeInsertQueueDpc(&s_aPokeDpcs[idCpu], 0, 0);
+
+ KeLowerIrql(oldIrql);
+ return fRet == TRUE ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */;
+}
+
+
+RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
+{
+ if (!RTMpIsCpuOnline(idCpu))
+ return !RTMpIsCpuPossible(idCpu)
+ ? VERR_CPU_NOT_FOUND
+ : VERR_CPU_OFFLINE;
+ /* Calls rtMpPokeCpuUsingDpc, rtMpPokeCpuUsingHalReqestIpiW7Plus or rtMpPokeCpuUsingBroadcastIpi. */
+ return g_pfnrtMpPokeCpuWorker(idCpu);
+}
+
+
+RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
+{
+ return false;
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/nt3fakes-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/nt3fakes-r0drv-nt.cpp
new file mode 100644
index 00000000..375069e5
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/nt3fakes-r0drv-nt.cpp
@@ -0,0 +1,813 @@
+/* $Id: nt3fakes-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - NT 3.x fakes for NT 4.0 KPIs.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define _IMAGE_NT_HEADERS RT_CONCAT(_IMAGE_NT_HEADERS,ARCH_BITS)
+#include "the-nt-kernel.h"
+#include <iprt/mem.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/ctype.h>
+#include <iprt/dbg.h>
+#include <iprt/err.h>
+#include <iprt/log.h>
+#include <iprt/string.h>
+#include <iprt/utf16.h>
+#include <iprt/x86.h>
+#include <iprt/formats/mz.h>
+#include <iprt/formats/pecoff.h>
+#include "internal-r0drv-nt.h"
+
+typedef uint32_t DWORD;
+#include <VerRsrc.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+DECLASM(void) rtNt3InitSymbolsAssembly(void); /* in nt3fakesA-r0drv-nt.asm */
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static uint32_t g_uNt3MajorVer = 3;
+static uint32_t g_uNt3MinorVer = 51;
+static uint32_t g_uNt3BuildNo = 1057;
+static bool g_fNt3Checked = false;
+static bool g_fNt3Smp = false; /**< Not reliable. */
+static bool volatile g_fNt3VersionInitialized = false;
+
+static uint8_t *g_pbNt3OsKrnl = (uint8_t *)UINT32_C(0x80100000);
+static uint32_t g_cbNt3OsKrnl = 0x300000;
+static uint8_t *g_pbNt3Hal = (uint8_t *)UINT32_C(0x80400000);
+static uint32_t g_cbNt3Hal = _512K;
+static bool volatile g_fNt3ModuleInfoInitialized = false;
+
+
+RT_C_DECLS_BEGIN
+/** @name KPIs we provide fallback implementations for.
+ *
+ * The assembly init routine will point the __imp_xxx variable to the NT
+ * implementation if available, using the fallback if not.
+ * @{ */
+decltype(PsGetVersion) *g_pfnrtPsGetVersion;
+decltype(ZwQuerySystemInformation) *g_pfnrtZwQuerySystemInformation;
+decltype(KeSetTimerEx) *g_pfnrtKeSetTimerEx;
+decltype(IoAttachDeviceToDeviceStack) *g_pfnrtIoAttachDeviceToDeviceStack;
+decltype(PsGetCurrentProcessId) *g_pfnrtPsGetCurrentProcessId;
+decltype(ZwYieldExecution) *g_pfnrtZwYieldExecution;
+decltype(ExAcquireFastMutex) *g_pfnrtExAcquireFastMutex;
+decltype(ExReleaseFastMutex) *g_pfnrtExReleaseFastMutex;
+/** @} */
+
+/** @name Fastcall optimizations not present in NT 3.1.
+ *
+ * We try resolve both the stdcall and fastcall variants and patch it up in
+ * assembly. The last four routines are in the hal.
+ *
+ * @{ */
+decltype(IofCompleteRequest) *g_pfnrtIofCompleteRequest;
+decltype(ObfDereferenceObject) *g_pfnrtObfDereferenceObject;
+decltype(IofCallDriver) *g_pfnrtIofCallDriver;
+decltype(KfAcquireSpinLock) *g_pfnrtKfAcquireSpinLock;
+decltype(KfReleaseSpinLock) *g_pfnrtKfReleaseSpinLock;
+decltype(KefAcquireSpinLockAtDpcLevel) *g_pfnrtKefAcquireSpinLockAtDpcLevel;
+decltype(KefReleaseSpinLockFromDpcLevel) *g_pfnrtKefReleaseSpinLockFromDpcLevel;
+decltype(KfLowerIrql) *g_pfnrtKfLowerIrql;
+decltype(KfRaiseIrql) *g_pfnrtKfRaiseIrql;
+
+VOID (__stdcall *g_pfnrtIoCompleteRequest)(PIRP, CCHAR);
+LONG_PTR (__stdcall *g_pfnrtObDereferenceObject)(PVOID);
+NTSTATUS (__stdcall *g_pfnrtIoCallDriver)(PDEVICE_OBJECT, PIRP);
+KIRQL (__stdcall *g_pfnrtKeAcquireSpinLock)(PKSPIN_LOCK);
+VOID (__stdcall *g_pfnrtKeReleaseSpinLock)(PKSPIN_LOCK, KIRQL);
+KIRQL (__stdcall *g_pfnrtKeAcquireSpinLockAtDpcLevel)(PKSPIN_LOCK);
+VOID (__stdcall *g_pfnrtKeReleaseSpinLockFromDpcLevel)(PKSPIN_LOCK);
+VOID (__stdcall *g_pfnrtKeLowerIrql)(KIRQL);
+KIRQL (__stdcall *g_pfnrtKeRaiseIrql)(KIRQL);
+/** @} */
+
+/** @name DATA exports and associated stuff
+ * @{ */
+/** Import address table entry for KeTickCount (defined in asm). */
+extern KSYSTEM_TIME *_imp__KeTickCount;
+/** @} */
+
+RT_C_DECLS_END
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void rtR0Nt3InitModuleInfo(void);
+
+
+/**
+ * Converts a string to a number, stopping at the first non-digit.
+ *
+ * @returns The value
+ * @param ppwcValue Pointer to the string pointer variable. Updated.
+ * @param pcwcValue Pointer to the string length variable. Updated.
+ */
+static uint32_t rtR0Nt3StringToNum(PCRTUTF16 *ppwcValue, size_t *pcwcValue)
+{
+ uint32_t uValue = 0;
+ PCRTUTF16 pwcValue = *ppwcValue;
+ size_t cwcValue = *pcwcValue;
+
+ while (cwcValue > 0)
+ {
+ RTUTF16 uc = *pwcValue;
+ unsigned uDigit = (unsigned)uc - (unsigned)'0';
+ if (uDigit < (unsigned)10)
+ {
+ uValue *= 10;
+ uValue += uDigit;
+ }
+ else
+ break;
+ pwcValue++;
+ cwcValue--;
+ }
+
+ *ppwcValue = pwcValue;
+ *pcwcValue = cwcValue;
+ return uValue;
+}
+
+
+/**
+ * Implements RTL_QUERY_REGISTRY_ROUTINE for processing
+ * 'HKLM/Software/Microsoft/Window NT/CurrentVersion/CurrentVersion'
+ */
+static NTSTATUS NTAPI rtR0Nt3VerEnumCallback_CurrentVersion(PWSTR pwszValueName, ULONG uValueType,
+ PVOID pvValue, ULONG cbValue, PVOID pvUser, PVOID pvEntryCtx)
+{
+ RT_NOREF(pwszValueName, pvEntryCtx);
+ if ( uValueType == REG_SZ
+ || uValueType == REG_EXPAND_SZ)
+ {
+ PCRTUTF16 pwcValue = (PCRTUTF16)pvValue;
+ size_t cwcValue = cbValue / sizeof(*pwcValue);
+ uint32_t uMajor = rtR0Nt3StringToNum(&pwcValue, &cwcValue);
+ uint32_t uMinor = 0;
+ if (cwcValue > 1)
+ {
+ pwcValue++;
+ cwcValue--;
+ uMinor = rtR0Nt3StringToNum(&pwcValue, &cwcValue);
+ }
+
+ if (uMajor >= 3)
+ {
+ g_uNt3MajorVer = uMajor;
+ g_uNt3MinorVer = uMinor;
+ RTLogBackdoorPrintf("rtR0Nt3VerEnumCallback_CurrentVersion found: uMajor=%u uMinor=%u\n", uMajor, uMinor);
+ *(uint32_t *)pvUser |= RT_BIT_32(0);
+ return STATUS_SUCCESS;
+ }
+
+ RTLogBackdoorPrintf("rtR0Nt3VerEnumCallback_CurrentVersion: '%.*ls'\n", cbValue / sizeof(RTUTF16), pvValue);
+ }
+ else
+ RTLogBackdoorPrintf("rtR0Nt3VerEnumCallback_CurrentVersion: uValueType=%u %.*Rhxs\n", uValueType, cbValue, pvValue);
+ return STATUS_SUCCESS;
+}
+
+
+/**
+ * Implements RTL_QUERY_REGISTRY_ROUTINE for processing
+ * 'HKLM/Software/Microsoft/Window NT/CurrentVersion/CurrentBuildNumber'
+ */
+static NTSTATUS NTAPI rtR0Nt3VerEnumCallback_CurrentBuildNumber(PWSTR pwszValueName, ULONG uValueType,
+ PVOID pvValue, ULONG cbValue, PVOID pvUser, PVOID pvEntryCtx)
+{
+ RT_NOREF(pwszValueName, pvEntryCtx);
+ if ( uValueType == REG_SZ
+ || uValueType == REG_EXPAND_SZ)
+ {
+ PCRTUTF16 pwcValue = (PCRTUTF16)pvValue;
+ size_t cwcValue = cbValue / sizeof(*pwcValue);
+ uint32_t uBuildNo = rtR0Nt3StringToNum(&pwcValue, &cwcValue);
+
+ if (uBuildNo >= 100 && uBuildNo < _1M)
+ {
+ g_uNt3BuildNo = uBuildNo;
+ RTLogBackdoorPrintf("rtR0Nt3VerEnumCallback_CurrentBuildNumber found: uBuildNo=%u\n", uBuildNo);
+ *(uint32_t *)pvUser |= RT_BIT_32(1);
+ return STATUS_SUCCESS;
+ }
+
+ RTLogBackdoorPrintf("rtR0Nt3VerEnumCallback_CurrentBuildNumber: '%.*ls'\n", cbValue / sizeof(RTUTF16), pvValue);
+ }
+ else
+ RTLogBackdoorPrintf("rtR0Nt3VerEnumCallback_CurrentBuildNumber: uValueType=%u %.*Rhxs\n", uValueType, cbValue, pvValue);
+ return STATUS_SUCCESS;
+}
+
+
+/**
+ * Implements RTL_QUERY_REGISTRY_ROUTINE for processing
+ * 'HKLM/Software/Microsoft/Window NT/CurrentVersion/CurrentType'
+ */
+static NTSTATUS NTAPI rtR0Nt3VerEnumCallback_CurrentType(PWSTR pwszValueName, ULONG uValueType,
+ PVOID pvValue, ULONG cbValue, PVOID pvUser, PVOID pvEntryCtx)
+{
+ RT_NOREF(pwszValueName, pvEntryCtx);
+ if ( uValueType == REG_SZ
+ || uValueType == REG_EXPAND_SZ)
+ {
+ PCRTUTF16 pwcValue = (PCRTUTF16)pvValue;
+ size_t cwcValue = cbValue / sizeof(*pwcValue);
+
+ int fSmp = -1;
+ if (cwcValue >= 12 && RTUtf16NICmpAscii(pwcValue, "Uniprocessor", 12) == 0)
+ {
+ cwcValue -= 12;
+ pwcValue += 12;
+ fSmp = 0;
+ }
+ else if (cwcValue >= 14 && RTUtf16NICmpAscii(pwcValue, "Multiprocessor", 14) == 0)
+ {
+ cwcValue -= 14;
+ pwcValue += 14;
+ fSmp = 1;
+ }
+ if (fSmp != -1)
+ {
+ while (cwcValue > 0 && RT_C_IS_SPACE(*pwcValue))
+ cwcValue--, pwcValue++;
+
+ int fChecked = -1;
+ if (cwcValue >= 4 && RTUtf16NICmpAscii(pwcValue, "Free", 4) == 0)
+ fChecked = 0;
+ else if (cwcValue >= 7 && RTUtf16NICmpAscii(pwcValue, "Checked", 7) == 0)
+ fChecked = 1;
+ if (fChecked != -1)
+ {
+ g_fNt3Smp = fSmp != 0;
+ g_fNt3Checked = fChecked != 0;
+ RTLogBackdoorPrintf("rtR0Nt3VerEnumCallback_CurrentType found: fSmp=%d fChecked=%d\n", fSmp, fChecked);
+ *(uint32_t *)pvUser |= RT_BIT_32(2);
+ return STATUS_SUCCESS;
+ }
+ }
+
+ RTLogBackdoorPrintf("rtR0Nt3VerEnumCallback_CurrentType: '%.*ls'\n", cbValue / sizeof(RTUTF16), pvValue);
+ }
+ else
+ RTLogBackdoorPrintf("rtR0Nt3VerEnumCallback_CurrentType: uValueType=%u %.*Rhxs\n", uValueType, cbValue, pvValue);
+ return STATUS_SUCCESS;
+}
+
+
+/**
+ * Figure out the NT 3 version from the registry.
+ *
+ * @note this will be called before the rtR0Nt3InitSymbols is called.
+ */
+static void rtR0Nt3InitVersion(void)
+{
+ /*
+ * No PsGetVersion, so try the registry. Unfortunately not necessarily
+ * initialized when we're loaded.
+ */
+ RTL_QUERY_REGISTRY_TABLE aQuery[4];
+ RT_ZERO(aQuery);
+ aQuery[0].QueryRoutine = rtR0Nt3VerEnumCallback_CurrentVersion;
+ aQuery[0].Flags = 0;
+ aQuery[0].Name = L"CurrentVersion";
+ aQuery[0].EntryContext = NULL;
+ aQuery[0].DefaultType = REG_NONE;
+
+ aQuery[1].QueryRoutine = rtR0Nt3VerEnumCallback_CurrentBuildNumber;
+ aQuery[1].Flags = 0;
+ aQuery[1].Name = L"CurrentBuildNumber";
+ aQuery[1].EntryContext = NULL;
+ aQuery[1].DefaultType = REG_NONE;
+
+ aQuery[2].QueryRoutine = rtR0Nt3VerEnumCallback_CurrentType;
+ aQuery[2].Flags = 0;
+ aQuery[2].Name = L"CurrentType";
+ aQuery[2].EntryContext = NULL;
+ aQuery[2].DefaultType = REG_NONE;
+
+ uint32_t fFound = 0;
+ //NTSTATUS rcNt = RtlQueryRegistryValues(RTL_REGISTRY_WINDOWS_NT, NULL, &aQuery[0], &fFound, NULL /*Environment*/);
+ NTSTATUS rcNt = RtlQueryRegistryValues(RTL_REGISTRY_ABSOLUTE,
+ L"\\Registry\\Machine\\Software\\Microsoft\\Windows NT\\CurrentVersion",
+ &aQuery[0], &fFound, NULL /*Environment*/);
+ if (!NT_SUCCESS(rcNt))
+ RTLogBackdoorPrintf("rtR0Nt3InitVersion: RtlQueryRegistryValues failed: %#x\n", rcNt);
+ else
+ RTLogBackdoorPrintf("rtR0Nt3InitVersion: Didn't get all values: fFound=%#x\n", fFound);
+
+ /*
+ * We really need the version number. Build, type and SMP is off less importance.
+ * Derive it from the NT kernel PE header.
+ */
+ if (!(fFound & RT_BIT_32(0)))
+ {
+ if (!g_fNt3ModuleInfoInitialized)
+ rtR0Nt3InitModuleInfo();
+
+ PIMAGE_DOS_HEADER pMzHdr = (PIMAGE_DOS_HEADER)g_pbNt3OsKrnl;
+ PIMAGE_NT_HEADERS32 pNtHdrs = (PIMAGE_NT_HEADERS32)&g_pbNt3OsKrnl[pMzHdr->e_lfanew];
+ if (pNtHdrs->OptionalHeader.MajorOperatingSystemVersion == 1)
+ {
+ /* NT 3.1 and NT 3.50 both set OS version to 1.0 in the optional header. */
+ g_uNt3MajorVer = 3;
+ if ( pNtHdrs->OptionalHeader.MajorLinkerVersion == 2
+ && pNtHdrs->OptionalHeader.MinorLinkerVersion < 50)
+ g_uNt3MinorVer = 10;
+ else
+ g_uNt3MinorVer = 50;
+ }
+ else
+ {
+ g_uNt3MajorVer = pNtHdrs->OptionalHeader.MajorOperatingSystemVersion;
+ g_uNt3MinorVer = pNtHdrs->OptionalHeader.MinorOperatingSystemVersion;
+ }
+ RTLogBackdoorPrintf("rtR0Nt3InitVersion: guessed %u.%u from PE header\n", g_uNt3MajorVer, g_uNt3MinorVer);
+
+ /* Check out the resource section, looking for VS_FIXEDFILEINFO. */
+ __try /* (pointless) */
+ {
+ PIMAGE_SECTION_HEADER paShdrs = (PIMAGE_SECTION_HEADER)(pNtHdrs + 1);
+ uint32_t const cShdrs = pNtHdrs->FileHeader.NumberOfSections;
+ uint32_t iShdr = 0;
+ while (iShdr < cShdrs && memcmp(paShdrs[iShdr].Name, ".rsrc", 6) != 0)
+ iShdr++;
+ if (iShdr < cShdrs)
+ {
+ if ( paShdrs[iShdr].VirtualAddress > 0
+ && paShdrs[iShdr].VirtualAddress < pNtHdrs->OptionalHeader.SizeOfImage)
+ {
+ uint32_t const cbRsrc = RT_MIN(paShdrs[iShdr].Misc.VirtualSize
+ ? paShdrs[iShdr].Misc.VirtualSize : paShdrs[iShdr].SizeOfRawData,
+ pNtHdrs->OptionalHeader.SizeOfImage - paShdrs[iShdr].VirtualAddress);
+ uint8_t const *pbRsrc = &g_pbNt3OsKrnl[paShdrs[iShdr].VirtualAddress];
+ uint32_t const *puDwords = (uint32_t const *)pbRsrc;
+ uint32_t cDWords = (cbRsrc - sizeof(VS_FIXEDFILEINFO) + sizeof(uint32_t)) / sizeof(uint32_t);
+ while (cDWords-- > 0)
+ {
+ if ( puDwords[0] == VS_FFI_SIGNATURE
+ && puDwords[1] == VS_FFI_STRUCVERSION)
+ {
+ VS_FIXEDFILEINFO const *pVerInfo = (VS_FIXEDFILEINFO const *)puDwords;
+ g_uNt3MajorVer = pVerInfo->dwProductVersionMS >> 16;
+ g_uNt3MinorVer = pVerInfo->dwProductVersionMS >> 16;
+ g_uNt3BuildNo = pVerInfo->dwProductVersionLS >> 16;
+ RTLogBackdoorPrintf("rtR0Nt3InitVersion: Found version info %u.%u build %u\n",
+ g_uNt3MajorVer, g_uNt3MinorVer, g_uNt3BuildNo);
+ break;
+ }
+ puDwords++;
+ }
+ }
+ }
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ RTLogBackdoorPrintf("rtR0Nt3InitVersion: Exception scanning .rsrc section for version info!\n");
+ }
+ }
+
+ /*
+ * If we've got PsGetVersion, use it to override the above finding!
+ * (We may end up here for reasons other than the PsGetVersion fallback.)
+ */
+ if (g_pfnrtPsGetVersion)
+ {
+ WCHAR wszCsd[64];
+ UNICODE_STRING UniStr;
+ UniStr.Buffer = wszCsd;
+ UniStr.MaximumLength = sizeof(wszCsd) - sizeof(WCHAR);
+ UniStr.Length = 0;
+ RT_ZERO(wszCsd);
+ ULONG uMajor = 3;
+ ULONG uMinor = 51;
+ ULONG uBuildNo = 1057;
+ BOOLEAN fChecked = g_pfnrtPsGetVersion(&uMajor, &uMinor, &uBuildNo, &UniStr);
+
+ g_uNt3MajorVer = uMajor;
+ g_uNt3MinorVer = uMinor;
+ g_uNt3BuildNo = uBuildNo;
+ g_fNt3Checked = fChecked != FALSE;
+ }
+
+ g_fNt3VersionInitialized = true;
+}
+
+
+extern "C" DECLEXPORT(BOOLEAN) __stdcall
+Nt3Fb_PsGetVersion(ULONG *puMajor, ULONG *puMinor, ULONG *puBuildNo, UNICODE_STRING *pCsdStr)
+{
+ if (!g_fNt3VersionInitialized)
+ rtR0Nt3InitVersion();
+ if (puMajor)
+ *puMajor = g_uNt3MajorVer;
+ if (puMinor)
+ *puMinor = g_uNt3MinorVer;
+ if (puBuildNo)
+ *puBuildNo = g_uNt3BuildNo;
+ if (pCsdStr)
+ {
+ pCsdStr->Buffer[0] = '\0';
+ pCsdStr->Length = 0;
+ }
+ return g_fNt3Checked;
+}
+
+
+/**
+ * Worker for rtR0Nt3InitModuleInfo.
+ */
+static bool rtR0Nt3InitModuleInfoOne(const char *pszImage, uint8_t const *pbCode, uint8_t **ppbModule, uint32_t *pcbModule)
+{
+ uintptr_t const uImageAlign = _4K; /* XP may put the kernel at */
+
+ /* Align pbCode. */
+ pbCode = (uint8_t const *)((uintptr_t)pbCode & ~(uintptr_t)(uImageAlign - 1));
+
+ /* Scan backwards till we find a PE signature. */
+ for (uint32_t cbChecked = 0; cbChecked < _64M; cbChecked += uImageAlign, pbCode -= uImageAlign)
+ {
+ if (!MmIsAddressValid((void *)pbCode))
+ continue;
+
+ uint32_t uZero = 0;
+ uint32_t offNewHdr = 0;
+ __try /* pointless */
+ {
+ uZero = *(uint32_t const *)pbCode;
+ offNewHdr = *(uint32_t const *)&pbCode[RT_UOFFSETOF(IMAGE_DOS_HEADER, e_lfanew)];
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ RTLogBackdoorPrintf("rtR0Nt3InitModuleInfo: Exception at %p scanning for DOS header...\n", pbCode);
+ continue;
+ }
+ if ( (uint16_t)uZero == IMAGE_DOS_SIGNATURE
+ && offNewHdr < _2K
+ && offNewHdr >= sizeof(IMAGE_DOS_HEADER))
+ {
+ RT_CONCAT(IMAGE_NT_HEADERS,ARCH_BITS) NtHdrs;
+ __try /* pointless */
+ {
+ NtHdrs = *(decltype(NtHdrs) const *)&pbCode[offNewHdr];
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ RTLogBackdoorPrintf("rtR0Nt3InitModuleInfo: Exception at %p reading NT headers...\n", pbCode);
+ continue;
+ }
+ if ( NtHdrs.Signature == IMAGE_NT_SIGNATURE
+ && NtHdrs.FileHeader.SizeOfOptionalHeader == sizeof(NtHdrs.OptionalHeader)
+ && NtHdrs.FileHeader.NumberOfSections > 2
+ && NtHdrs.FileHeader.NumberOfSections < _4K
+ && NtHdrs.OptionalHeader.Magic == RT_CONCAT3(IMAGE_NT_OPTIONAL_HDR,ARCH_BITS,_MAGIC))
+ {
+ *ppbModule = (uint8_t *)pbCode;
+ *pcbModule = NtHdrs.OptionalHeader.SizeOfImage;
+ RTLogBackdoorPrintf("rtR0Nt3InitModuleInfo: Found %s at %#p LB %#x\n",
+ pszImage, pbCode, NtHdrs.OptionalHeader.SizeOfImage);
+ return true;
+ }
+ }
+ }
+ RTLogBackdoorPrintf("rtR0Nt3InitModuleInfo: Warning! Unable to locate %s...\n");
+ return false;
+}
+
+
+/**
+ * Initializes the module information (NTOSKRNL + HAL) using exported symbols.
+ * This only works as long as noone is intercepting the symbols.
+ */
+static void rtR0Nt3InitModuleInfo(void)
+{
+ rtR0Nt3InitModuleInfoOne("ntoskrnl.exe", (uint8_t const *)(uintptr_t)IoGetCurrentProcess, &g_pbNt3OsKrnl, &g_cbNt3OsKrnl);
+ rtR0Nt3InitModuleInfoOne("hal.dll", (uint8_t const *)(uintptr_t)HalGetBusData, &g_pbNt3Hal, &g_cbNt3Hal);
+ g_fNt3ModuleInfoInitialized = true;
+}
+
+
+extern "C" DECLEXPORT(NTSTATUS) __stdcall
+Nt3Fb_ZwQuerySystemInformation(SYSTEM_INFORMATION_CLASS enmClass, PVOID pvBuf, ULONG cbBuf, PULONG pcbActual)
+{
+ switch (enmClass)
+ {
+ case SystemModuleInformation:
+ {
+ PRTL_PROCESS_MODULES pInfo = (PRTL_PROCESS_MODULES)pvBuf;
+ ULONG cbNeeded = RT_UOFFSETOF(RTL_PROCESS_MODULES, Modules[2]);
+ if (pcbActual)
+ *pcbActual = cbNeeded;
+ if (cbBuf < cbNeeded)
+ return STATUS_INFO_LENGTH_MISMATCH;
+
+ if (!g_fNt3ModuleInfoInitialized)
+ rtR0Nt3InitModuleInfo();
+
+ pInfo->NumberOfModules = 2;
+
+ /* ntoskrnl.exe */
+ pInfo->Modules[0].Section = NULL;
+ pInfo->Modules[0].MappedBase = g_pbNt3OsKrnl;
+ pInfo->Modules[0].ImageBase = g_pbNt3OsKrnl;
+ pInfo->Modules[0].ImageSize = g_cbNt3OsKrnl;
+ pInfo->Modules[0].Flags = 0;
+ pInfo->Modules[0].LoadOrderIndex = 0;
+ pInfo->Modules[0].InitOrderIndex = 0;
+ pInfo->Modules[0].LoadCount = 1024;
+ pInfo->Modules[0].OffsetToFileName = sizeof("\\SystemRoot\\System32\\") - 1;
+ memcpy(pInfo->Modules[0].FullPathName, RT_STR_TUPLE("\\SystemRoot\\System32\\ntoskrnl.exe"));
+
+ /* hal.dll */
+ pInfo->Modules[1].Section = NULL;
+ pInfo->Modules[1].MappedBase = g_pbNt3Hal;
+ pInfo->Modules[1].ImageBase = g_pbNt3Hal;
+ pInfo->Modules[1].ImageSize = g_cbNt3Hal;
+ pInfo->Modules[1].Flags = 0;
+ pInfo->Modules[1].LoadOrderIndex = 1;
+ pInfo->Modules[1].InitOrderIndex = 0;
+ pInfo->Modules[1].LoadCount = 1024;
+ pInfo->Modules[1].OffsetToFileName = sizeof("\\SystemRoot\\System32\\") - 1;
+ memcpy(pInfo->Modules[1].FullPathName, RT_STR_TUPLE("\\SystemRoot\\System32\\hal.dll"));
+
+ return STATUS_SUCCESS;
+ }
+
+ default:
+ return STATUS_INVALID_INFO_CLASS;
+ }
+}
+
+/**
+ * Calculates the length indicated by an ModR/M sequence.
+ *
+ * @returns Length, including RM byte.
+ * @param bRm The RM byte.
+ */
+static uint32_t rtR0Nt3CalcModRmLength(uint8_t bRm)
+{
+ uint32_t cbRm = 1;
+
+ if ( (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)
+ || (bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
+ cbRm += 4; /* disp32 */
+ else if ((bRm & X86_MODRM_MOD_MASK) == (1 << X86_MODRM_MOD_SHIFT))
+ cbRm += 1; /* disp8 */
+ else if ((bRm & X86_MODRM_MOD_MASK) == (2 << X86_MODRM_MOD_SHIFT))
+ cbRm += 2; /* disp16 */
+
+ if ((bRm & X86_MODRM_RM_MASK) == 4 && (bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
+ cbRm += 1; /* SIB */
+
+ return cbRm;
+}
+
+
+/**
+ * Init symbols.
+ *
+ * This is called after both ZwQuerySystemInformation and PsGetVersion are used
+ * for the first time.
+ *
+ * @returns IPRT status code
+ * @param hKrnlInfo Kernel symbol digger handle.
+ */
+DECLHIDDEN(int) rtR0Nt3InitSymbols(RTDBGKRNLINFO hKrnlInfo)
+{
+ /*
+ * Resolve symbols. (We set C variables (g_pfnrtXxx) here, not the __imp__Xxx ones.)
+ */
+#define GET_SYSTEM_ROUTINE(a_fnName) do { \
+ RT_CONCAT(g_pfnrt, a_fnName) = (decltype(RT_CONCAT(g_pfnrt, a_fnName)))RTR0DbgKrnlInfoGetSymbol(hKrnlInfo, NULL, #a_fnName); \
+ } while (0)
+
+ GET_SYSTEM_ROUTINE(PsGetVersion);
+ GET_SYSTEM_ROUTINE(ZwQuerySystemInformation);
+ GET_SYSTEM_ROUTINE(KeSetTimerEx);
+ GET_SYSTEM_ROUTINE(IoAttachDeviceToDeviceStack);
+ GET_SYSTEM_ROUTINE(PsGetCurrentProcessId);
+ GET_SYSTEM_ROUTINE(ZwYieldExecution);
+ GET_SYSTEM_ROUTINE(ExAcquireFastMutex);
+ GET_SYSTEM_ROUTINE(ExReleaseFastMutex);
+
+#define GET_FAST_CALL_SYSTEM_ROUTINE(a_fnFastcall, a_fnStdcall) do { \
+ GET_SYSTEM_ROUTINE(a_fnFastcall); \
+ GET_SYSTEM_ROUTINE(a_fnStdcall); \
+ AssertLogRelReturn(RT_CONCAT(g_pfnrt,a_fnFastcall) || RT_CONCAT(g_pfnrt,a_fnStdcall), VERR_INTERNAL_ERROR_3); \
+ } while (0)
+ GET_FAST_CALL_SYSTEM_ROUTINE(IofCompleteRequest, IoCompleteRequest);
+ GET_FAST_CALL_SYSTEM_ROUTINE(ObfDereferenceObject, ObDereferenceObject);
+ GET_FAST_CALL_SYSTEM_ROUTINE(IofCallDriver, IoCallDriver);
+ GET_FAST_CALL_SYSTEM_ROUTINE(KfAcquireSpinLock, KeAcquireSpinLock);
+ GET_FAST_CALL_SYSTEM_ROUTINE(KfReleaseSpinLock, KeReleaseSpinLock);
+ GET_FAST_CALL_SYSTEM_ROUTINE(KfLowerIrql, KeLowerIrql);
+ GET_FAST_CALL_SYSTEM_ROUTINE(KfRaiseIrql, KeRaiseIrql);
+ GET_FAST_CALL_SYSTEM_ROUTINE(KefAcquireSpinLockAtDpcLevel, KeAcquireSpinLockAtDpcLevel);
+ GET_FAST_CALL_SYSTEM_ROUTINE(KefReleaseSpinLockFromDpcLevel, KeReleaseSpinLockFromDpcLevel);
+
+ /*
+ * We need to call assembly to update the __imp__Xxx entries, since C
+ * doesn't allow '@' in symbols.
+ */
+ rtNt3InitSymbolsAssembly();
+
+ /*
+ * Tick count data. We disassemble KeQueryTickCount until we find the
+ * first absolute address referenced in it.
+ * %80105b70 8b 44 24 04 mov eax, dword [esp+004h]
+ * %80105b74 c7 40 04 00 00 00 00 mov dword [eax+004h], 000000000h
+ * %80105b7b 8b 0d 88 70 19 80 mov ecx, dword [080197088h]
+ * %80105b81 89 08 mov dword [eax], ecx
+ * %80105b83 c2 04 00 retn 00004h
+ */
+ _imp__KeTickCount = (decltype(_imp__KeTickCount))RTR0DbgKrnlInfoGetSymbol(hKrnlInfo, NULL, "KeTickCount");
+ if (!_imp__KeTickCount)
+ {
+ if (!g_fNt3VersionInitialized)
+ rtR0Nt3InitVersion();
+ Assert(g_uNt3MajorVer == 3 && g_uNt3MinorVer < 50);
+
+ uint8_t const *pbCode = (uint8_t const *)RTR0DbgKrnlInfoGetSymbol(hKrnlInfo, NULL, "KeQueryTickCount");
+ AssertLogRelReturn(pbCode, VERR_INTERNAL_ERROR_2);
+
+ for (uint32_t off = 0; off < 128 && _imp__KeTickCount == NULL;)
+ {
+ uint8_t const b1 = pbCode[off++];
+ switch (b1)
+ {
+ case 0x8b: /* mov reg, r/m ; We're looking for absolute address in r/m. */
+ if ((pbCode[off] & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5 /*disp32*/)
+ _imp__KeTickCount = *(KSYSTEM_TIME **)&pbCode[off + 1];
+ RT_FALL_THRU();
+ case 0x89: /* mov r/m, reg */
+ off += rtR0Nt3CalcModRmLength(pbCode[off]);
+ break;
+
+ case 0xc7:
+ if ((pbCode[off] & X86_MODRM_REG_MASK) == 0) /* mov r/m, imm32 */
+ off += rtR0Nt3CalcModRmLength(pbCode[off]) + 4;
+ else
+ {
+ RTLogBackdoorPrintf("rtR0Nt3InitSymbols: Failed to find KeTickCount! Encountered unknown opcode at %#x! %.*Rhxs\n",
+ off - 1, RT_MAX(off + 16, RT_MIN(PAGE_SIZE - ((uintptr_t)pbCode & PAGE_OFFSET_MASK), 128)), pbCode);
+ return VERR_INTERNAL_ERROR_3;
+ }
+ break;
+
+ case 0xc2: /* ret iw */
+ RTLogBackdoorPrintf("rtR0Nt3InitSymbols: Failed to find KeTickCount! Encountered RET! %.*Rhxs\n",
+ off + 2, pbCode);
+ return VERR_INTERNAL_ERROR_3;
+
+ default:
+ RTLogBackdoorPrintf("rtR0Nt3InitSymbols: Failed to find KeTickCount! Encountered unknown opcode at %#x! %.*Rhxs\n",
+ off - 1, RT_MAX(off + 16, RT_MIN(PAGE_SIZE - ((uintptr_t)pbCode & PAGE_OFFSET_MASK), 128)), pbCode);
+ return VERR_INTERNAL_ERROR_3;
+
+ /* Just in case: */
+
+ case 0xa1: /* mov eax, [m32] */
+ _imp__KeTickCount = *(KSYSTEM_TIME **)&pbCode[off];
+ off += 4;
+ break;
+
+ case 50: case 51: case 52: case 53: case 54: case 55: case 56: case 57: /* push reg */
+ break;
+ }
+ }
+ if (!_imp__KeTickCount)
+ {
+ RTLogBackdoorPrintf("rtR0Nt3InitSymbols: Failed to find KeTickCount after 128 bytes! %.*Rhxs\n", 128, pbCode);
+ return VERR_INTERNAL_ERROR_3;
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+extern "C" DECLEXPORT(VOID)
+Nt3Fb_KeInitializeTimerEx(PKTIMER pTimer, TIMER_TYPE enmType)
+{
+ KeInitializeTimer(pTimer);
+ NOREF(enmType);
+ /** @todo Default is NotificationTimer, for SyncrhonizationTimer we need to
+ * do more work. timer-r0drv-nt.cpp is using the latter. :/ */
+}
+
+
+extern "C" DECLEXPORT(BOOLEAN) __stdcall
+Nt3Fb_KeSetTimerEx(PKTIMER pTimer, LARGE_INTEGER DueTime, LONG cMsPeriod, PKDPC pDpc)
+{
+ AssertReturn(cMsPeriod == 0, FALSE);
+ return KeSetTimer(pTimer, DueTime, pDpc);
+}
+
+
+extern "C" DECLEXPORT(PDEVICE_OBJECT)
+Nt3Fb_IoAttachDeviceToDeviceStack(PDEVICE_OBJECT pSourceDevice, PDEVICE_OBJECT pTargetDevice)
+{
+ NOREF(pSourceDevice); NOREF(pTargetDevice);
+ return NULL;
+}
+
+
+extern "C" DECLEXPORT(HANDLE)
+Nt3Fb_PsGetCurrentProcessId(void)
+{
+ if (!g_fNt3VersionInitialized)
+ rtR0Nt3InitVersion();
+
+ uint8_t const *pbProcess = (uint8_t const *)IoGetCurrentProcess();
+ if ( g_uNt3MajorVer > 3
+ || g_uNt3MinorVer >= 50)
+ return *(HANDLE const *)&pbProcess[0x94];
+ return *(HANDLE const *)&pbProcess[0xb0];
+}
+
+
+extern "C" DECLEXPORT(NTSTATUS)
+Nt3Fb_ZwYieldExecution(VOID)
+{
+ LARGE_INTEGER Interval;
+ Interval.QuadPart = 0;
+ KeDelayExecutionThread(KernelMode, FALSE, &Interval);
+ return STATUS_SUCCESS;
+}
+
+
+/**
+ * This is a simple implementation of the fast mutex api introduced in 3.50.
+ */
+extern "C" DECLEXPORT(VOID) FASTCALL
+Nt3Fb_ExAcquireFastMutex(PFAST_MUTEX pFastMtx)
+{
+ PETHREAD pSelf = PsGetCurrentThread();
+ KIRQL OldIrql;
+ KeRaiseIrql(APC_LEVEL, &OldIrql);
+
+ /* The Count member is initialized to 1. So if we decrement it to zero, we're
+ the first locker and owns the mutex. Otherwise we must wait for our turn. */
+ int32_t cLockers = ASMAtomicDecS32((int32_t volatile *)&pFastMtx->Count);
+ if (cLockers != 0)
+ {
+ ASMAtomicIncU32((uint32_t volatile *)&pFastMtx->Contention);
+ KeWaitForSingleObject(&pFastMtx->Event, Executive, KernelMode, FALSE /*fAlertable*/, NULL /*pTimeout*/);
+ }
+
+ pFastMtx->Owner = (PKTHREAD)pSelf;
+ pFastMtx->OldIrql = OldIrql;
+}
+
+
+/**
+ * This is a simple implementation of the fast mutex api introduced in 3.50.
+ */
+extern "C" DECLEXPORT(VOID) FASTCALL
+Nt3Fb_ExReleaseFastMutex(PFAST_MUTEX pFastMtx)
+{
+ AssertMsg(pFastMtx->Owner == (PKTHREAD)PsGetCurrentThread(), ("Owner=%p, expected %p\n", pFastMtx->Owner, PsGetCurrentThread()));
+
+ KIRQL OldIrql = pFastMtx->OldIrql;
+ pFastMtx->Owner = NULL;
+ int32_t cLockers = ASMAtomicIncS32((int32_t volatile *)&pFastMtx->Count);
+ if (cLockers < 0)
+ KeSetEvent(&pFastMtx->Event, EVENT_INCREMENT, FALSE /*fWait*/);
+ if (OldIrql != APC_LEVEL)
+ KeLowerIrql(OldIrql);
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/nt3fakes-stub-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/nt3fakes-stub-r0drv-nt.cpp
new file mode 100644
index 00000000..bd6bfa10
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/nt3fakes-stub-r0drv-nt.cpp
@@ -0,0 +1,42 @@
+/* $Id: nt3fakes-stub-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - NT 3.x fakes for NT 4.0+ KPIs, init stub.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+#include <iprt/errcore.h>
+#include <iprt/dbg.h>
+#include "internal-r0drv-nt.h"
+
+
+int rtR0Nt3InitSymbols(RTDBGKRNLINFO hKrnlInfo)
+{
+ NOREF(hKrnlInfo);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/nt3fakesA-r0drv-nt.asm b/src/VBox/Runtime/r0drv/nt/nt3fakesA-r0drv-nt.asm
new file mode 100644
index 00000000..4ef552ad
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/nt3fakesA-r0drv-nt.asm
@@ -0,0 +1,147 @@
+; $Id: nt3fakesA-r0drv-nt.asm $
+;; @file
+; IPRT - Companion to nt3fakes-r0drv-nt.cpp that provides import stuff to satisfy the linker.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+; The contents of this file may alternatively be used under the terms
+; of the Common Development and Distribution License Version 1.0
+; (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+; VirtualBox OSE distribution, in which case the provisions of the
+; CDDL are applicable instead of those of the GPL.
+;
+; You may elect to license modified versions of this file under the
+; terms and conditions of either the GPL or the CDDL or both.
+;
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "iprt/asmdefs.mac"
+
+%undef NAME
+%define NAME(name) NAME_OVERLOAD(name)
+
+BEGINCODE
+
+;;
+; Called from rtR0Nt3InitSymbols after symbols have been resolved.
+BEGINPROC _rtNt3InitSymbolsAssembly
+ push ebp
+ mov ebp, esp
+
+;;
+; @param 1 The fastcall name.
+; @param 2 Byte size of arguments.
+%macro DefineImportDataAndInitCode 3
+extern $%1 %+ Nt3Fb_ %+ %2 %+ @ %+ %3
+BEGINDATA
+extern _g_pfnrt %+ %2
+GLOBALNAME __imp_ %+ %1 %+ %2 %+ @ %+ %3
+ dd $%1 %+ Nt3Fb_ %+ %2 %+ @ %+ %3
+BEGINCODE
+ mov eax, [_g_pfnrt %+ %2]
+ test eax, eax
+ jz %%next
+ mov [__imp_ %+ %1 %+ %2 %+ @ %+ %3], eax
+%%next:
+%endmacro
+
+ DefineImportDataAndInitCode _,PsGetVersion, 16
+ DefineImportDataAndInitCode _,ZwQuerySystemInformation, 16
+ DefineImportDataAndInitCode _,KeSetTimerEx, 20
+ DefineImportDataAndInitCode _,IoAttachDeviceToDeviceStack, 8
+ DefineImportDataAndInitCode _,PsGetCurrentProcessId, 0
+ DefineImportDataAndInitCode _,ZwYieldExecution, 0
+ DefineImportDataAndInitCode @,ExAcquireFastMutex, 4
+ DefineImportDataAndInitCode @,ExReleaseFastMutex, 4
+
+ xor eax, eax
+ leave
+ ret
+ENDPROC _rtNt3InitSymbolsAssembly
+
+
+;;
+; @param 1 The fastcall name.
+; @param 2 The stdcall name.
+; @param 3 Byte size of arguments.
+; @param 4 Zero if 1:1 mapping;
+; One if 2nd parameter is a byte pointer that the farcall version
+; instead returns in al.
+%macro FastOrStdCallWrapper 4
+BEGINCODE
+extern _g_pfnrt %+ %1
+extern _g_pfnrt %+ %2
+BEGINPROC_EXPORTED $@ %+ %1 %+ @ %+ %3
+ mov eax, [_g_pfnrt %+ %1]
+ cmp eax, 0
+ jnz .got_fast_call
+ mov eax, .stdcall_wrapper
+ mov [__imp_@ %+ %1 %+ @ %+ %3], eax
+
+.stdcall_wrapper:
+ push ebp
+ mov ebp, esp
+%if %4 == 1
+ push dword 0
+ push esp
+%else
+ push edx
+%endif
+ push ecx
+ call [_g_pfnrt %+ %2]
+%if %4 == 1
+ movzx eax, byte [ebp - 4]
+%endif
+ leave
+ ret
+
+.got_fast_call:
+ mov [__imp_@ %+ %1 %+ @ %+ %3], eax
+ jmp eax
+ENDPROC $@ %+ %1 %+ @ %+ %3
+
+BEGINDATA
+GLOBALNAME __imp_@ %+ %1 %+ @ %+ %3
+ dd $@ %+ %1 %+ @ %+ %3
+%endmacro
+
+FastOrStdCallWrapper IofCompleteRequest, IoCompleteRequest, 8, 0
+FastOrStdCallWrapper IofCallDriver, IoCallDriver, 8, 0
+FastOrStdCallWrapper ObfDereferenceObject, ObDereferenceObject, 4, 0
+FastOrStdCallWrapper KfAcquireSpinLock, KeAcquireSpinLock, 4, 1
+FastOrStdCallWrapper KfReleaseSpinLock, KeReleaseSpinLock, 8, 0
+FastOrStdCallWrapper KfRaiseIrql, KeRaiseIrql, 4, 1
+FastOrStdCallWrapper KfLowerIrql, KeLowerIrql, 4, 0
+FastOrStdCallWrapper KefAcquireSpinLockAtDpcLevel, KeAcquireSpinLockAtDpcLevel, 4, 0
+FastOrStdCallWrapper KefReleaseSpinLockFromDpcLevel,KeReleaseSpinLockFromDpcLevel, 4, 0
+
+
+BEGINCODE
+; LONG FASTCALL InterlockedExchange(LONG volatile *,LONG );
+BEGINPROC_EXPORTED $@InterlockedExchange@8
+ mov eax, edx
+ xchg [ecx], eax
+ ret
+
+BEGINDATA
+GLOBALNAME __imp_@InterlockedExchange@8
+ dd $@InterlockedExchange@8
+
+
+BEGINDATA
+GLOBALNAME __imp__KeTickCount
+GLOBALNAME _KeTickCount
+ dd 0
+
diff --git a/src/VBox/Runtime/r0drv/nt/ntBldSymDb.cpp b/src/VBox/Runtime/r0drv/nt/ntBldSymDb.cpp
new file mode 100644
index 00000000..c01254e8
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/ntBldSymDb.cpp
@@ -0,0 +1,1212 @@
+/* $Id: ntBldSymDb.cpp $ */
+/** @file
+ * IPRT - RTDirCreateUniqueNumbered, generic implementation.
+ */
+
+/*
+ * Copyright (C) 2013-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/win/windows.h>
+#include <Dbghelp.h>
+
+#include <iprt/alloca.h>
+#include <iprt/dir.h>
+#include <iprt/file.h>
+#include <iprt/err.h>
+#include <iprt/getopt.h>
+#include <iprt/initterm.h>
+#include <iprt/list.h>
+#include <iprt/mem.h>
+#include <iprt/message.h>
+#include <iprt/path.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+#include <iprt/utf16.h>
+
+#include "r0drv/nt/symdb.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/** A structure member we're interested in. */
+typedef struct MYMEMBER
+{
+ /** The member name. */
+ const char * const pszName;
+ /** Reserved. */
+ uint32_t const fFlags;
+ /** The offset of the member. UINT32_MAX if not found. */
+ uint32_t off;
+ /** The size of the member. */
+ uint32_t cb;
+ /** Alternative names, optional.
+ * This is a string of zero terminated strings, ending with an zero length
+ * string (or double '\\0' if you like). */
+ const char * const pszzAltNames;
+} MYMEMBER;
+/** Pointer to a member we're interested. */
+typedef MYMEMBER *PMYMEMBER;
+
+/** Members we're interested in. */
+typedef struct MYSTRUCT
+{
+ /** The structure name. */
+ const char * const pszName;
+ /** Array of members we're interested in. */
+ MYMEMBER *paMembers;
+ /** The number of members we're interested in. */
+ uint32_t const cMembers;
+ /** Reserved. */
+ uint32_t const fFlags;
+} MYSTRUCT;
+
+/** Architecture. */
+typedef enum MYARCH
+{
+ MYARCH_X86,
+ MYARCH_AMD64,
+ MYARCH_DETECT
+} MYARCH;
+
+/** Set of structures for one kernel. */
+typedef struct MYSET
+{
+ /** The list entry. */
+ RTLISTNODE ListEntry;
+ /** The source PDB. */
+ char *pszPdb;
+ /** The OS version we've harvested structs for */
+ RTNTSDBOSVER OsVerInfo;
+ /** The architecture. */
+ MYARCH enmArch;
+ /** The structures and their member. */
+ MYSTRUCT aStructs[1];
+} MYSET;
+/** Pointer a set of structures for one kernel. */
+typedef MYSET *PMYSET;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Verbosity level (-v, --verbose). */
+static uint32_t g_iOptVerbose = 1;
+/** Set if we should force ahead despite errors. */
+static bool g_fOptForce = false;
+
+/** The members of the KPRCB structure that we're interested in. */
+static MYMEMBER g_aKprcbMembers[] =
+{
+ { "QuantumEnd", 0, UINT32_MAX, UINT32_MAX, NULL },
+ { "DpcQueueDepth", 0, UINT32_MAX, UINT32_MAX, "DpcData[0].DpcQueueDepth\0" },
+ { "VendorString", 0, UINT32_MAX, UINT32_MAX, NULL },
+};
+
+/** The structures we're interested in. */
+static MYSTRUCT g_aStructs[] =
+{
+ { "_KPRCB", &g_aKprcbMembers[0], RT_ELEMENTS(g_aKprcbMembers), 0 },
+};
+
+/** List of data we've found. This is sorted by version info. */
+static RTLISTANCHOR g_SetList;
+
+
+
+
+
+/**
+ * For debug/verbose output.
+ *
+ * @param pszFormat The format string.
+ * @param ... The arguments referenced in the format string.
+ */
+static void MyDbgPrintf(const char *pszFormat, ...)
+{
+ if (g_iOptVerbose > 1)
+ {
+ va_list va;
+ va_start(va, pszFormat);
+ RTPrintf("debug: ");
+ RTPrintfV(pszFormat, va);
+ va_end(va);
+ }
+}
+
+
+/**
+ * Returns the name we wish to use in the C code.
+ * @returns Structure name.
+ * @param pStruct The structure descriptor.
+ */
+static const char *figureCStructName(MYSTRUCT const *pStruct)
+{
+ const char *psz = pStruct->pszName;
+ while (*psz == '_')
+ psz++;
+ return psz;
+}
+
+
+/**
+ * Returns the name we wish to use in the C code.
+ * @returns Member name.
+ * @param pMember The member descriptor.
+ */
+static const char *figureCMemberName(MYMEMBER const *pMember)
+{
+ return pMember->pszName;
+}
+
+
+/**
+ * Creates a MYSET with copies of all the data and inserts it into the
+ * g_SetList in a orderly fashion.
+ *
+ * @param pOut The output stream.
+ */
+static void generateHeader(PRTSTREAM pOut)
+{
+ RTStrmPrintf(pOut,
+ "/* $" "I" "d" ": $ */\n" /* avoid it being expanded */
+ "/** @file\n"
+ " * IPRT - NT kernel type helpers - Autogenerated, do NOT edit.\n"
+ " */\n"
+ "\n"
+ "/*\n"
+ " * Copyright (C) 2013-2017 Oracle Corporation \n"
+ " *\n"
+ " * This file is part of VirtualBox Open Source Edition (OSE), as\n"
+ " * available from http://www.virtualbox.org. This file is free software;\n"
+ " * you can redistribute it and/or modify it under the terms of the GNU\n"
+ " * General Public License (GPL) as published by the Free Software\n"
+ " * Foundation, in version 2 as it comes in the \"COPYING\" file of the\n"
+ " * VirtualBox OSE distribution. VirtualBox OSE is distributed in the\n"
+ " * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.\n"
+ " *\n"
+ " * The contents of this file may alternatively be used under the terms\n"
+ " * of the Common Development and Distribution License Version 1.0\n"
+ " * (CDDL) only, as it comes in the \"COPYING.CDDL\" file of the\n"
+ " * VirtualBox OSE distribution, in which case the provisions of the\n"
+ " * CDDL are applicable instead of those of the GPL.\n"
+ " *\n"
+ " * You may elect to license modified versions of this file under the\n"
+ " * terms and conditions of either the GPL or the CDDL or both.\n"
+ " */\n"
+ "\n"
+ "\n"
+ "#ifndef IPRT_INCLUDED_SRC_nt_symdbdata_h\n"
+ "#define IPRT_INCLUDED_SRC_nt_symdbdata_h\n"
+ "\n"
+ "#include \"r0drv/nt/symdb.h\"\n"
+ "\n"
+ );
+
+ /*
+ * Generate types.
+ */
+ for (uint32_t i = 0; i < RT_ELEMENTS(g_aStructs); i++)
+ {
+ const char *pszStructName = figureCStructName(&g_aStructs[i]);
+
+ RTStrmPrintf(pOut,
+ "typedef struct RTNTSDBTYPE_%s\n"
+ "{\n",
+ pszStructName);
+ PMYMEMBER paMembers = g_aStructs[i].paMembers;
+ for (uint32_t j = 0; j < g_aStructs->cMembers; j++)
+ {
+ const char *pszMemName = figureCMemberName(&paMembers[j]);
+ RTStrmPrintf(pOut,
+ " uint32_t off%s;\n"
+ " uint32_t cb%s;\n",
+ pszMemName, pszMemName);
+ }
+
+ RTStrmPrintf(pOut,
+ "} RTNTSDBTYPE_%s;\n"
+ "\n",
+ pszStructName);
+ }
+
+ RTStrmPrintf(pOut,
+ "\n"
+ "typedef struct RTNTSDBSET\n"
+ "{\n"
+ " RTNTSDBOSVER%-20s OsVerInfo;\n", "");
+ for (uint32_t i = 0; i < RT_ELEMENTS(g_aStructs); i++)
+ {
+ const char *pszStructName = figureCStructName(&g_aStructs[i]);
+ RTStrmPrintf(pOut, " RTNTSDBTYPE_%-20s %s;\n", pszStructName, pszStructName);
+ }
+ RTStrmPrintf(pOut,
+ "} RTNTSDBSET;\n"
+ "typedef RTNTSDBSET const *PCRTNTSDBSET;\n"
+ "\n");
+
+ /*
+ * Output the data.
+ */
+ RTStrmPrintf(pOut,
+ "\n"
+ "#ifndef RTNTSDB_NO_DATA\n"
+ "const RTNTSDBSET g_artNtSdbSets[] = \n"
+ "{\n");
+ PMYSET pSet;
+ RTListForEach(&g_SetList, pSet, MYSET, ListEntry)
+ {
+ const char *pszArch = pSet->enmArch == MYARCH_AMD64 ? "AMD64" : "X86";
+ RTStrmPrintf(pOut,
+ "# ifdef RT_ARCH_%s\n"
+ " { /* Source: %s */\n"
+ " /*.OsVerInfo = */\n"
+ " {\n"
+ " /* .uMajorVer = */ %u,\n"
+ " /* .uMinorVer = */ %u,\n"
+ " /* .fChecked = */ %s,\n"
+ " /* .fSmp = */ %s,\n"
+ " /* .uCsdNo = */ %u,\n"
+ " /* .uBuildNo = */ %u,\n"
+ " },\n",
+ pszArch,
+ pSet->pszPdb,
+ pSet->OsVerInfo.uMajorVer,
+ pSet->OsVerInfo.uMinorVer,
+ pSet->OsVerInfo.fChecked ? "true" : "false",
+ pSet->OsVerInfo.fSmp ? "true" : "false",
+ pSet->OsVerInfo.uCsdNo,
+ pSet->OsVerInfo.uBuildNo);
+ for (uint32_t i = 0; i < RT_ELEMENTS(pSet->aStructs); i++)
+ {
+ const char *pszStructName = figureCStructName(&pSet->aStructs[i]);
+ RTStrmPrintf(pOut,
+ " /* .%s = */\n"
+ " {\n", pszStructName);
+ PMYMEMBER paMembers = pSet->aStructs[i].paMembers;
+ for (uint32_t j = 0; j < pSet->aStructs[i].cMembers; j++)
+ {
+ const char *pszMemName = figureCMemberName(&paMembers[j]);
+ RTStrmPrintf(pOut,
+ " /* .off%-25s = */ %#06x,\n"
+ " /* .cb%-26s = */ %#06x,\n",
+ pszMemName, paMembers[j].off,
+ pszMemName, paMembers[j].cb);
+ }
+ RTStrmPrintf(pOut,
+ " },\n");
+ }
+ RTStrmPrintf(pOut,
+ " },\n"
+ "# endif\n"
+ );
+ }
+
+ RTStrmPrintf(pOut,
+ "};\n"
+ "#endif /* !RTNTSDB_NO_DATA */\n"
+ "\n");
+
+ RTStrmPrintf(pOut, "\n#endif\n\n");
+}
+
+
+/**
+ * Creates a MYSET with copies of all the data and inserts it into the
+ * g_SetList in a orderly fashion.
+ *
+ * @returns Fully complained exit code.
+ * @param pOsVerInfo The OS version info.
+ * @param enmArch The NT architecture of the incoming PDB.
+ * @param pszPdb The PDB file name.
+ */
+static RTEXITCODE saveStructures(PRTNTSDBOSVER pOsVerInfo, MYARCH enmArch, const char *pszPdb)
+{
+ /*
+ * Allocate one big chunk, figure it's size once.
+ */
+ static size_t s_cbNeeded = 0;
+ if (s_cbNeeded == 0)
+ {
+ s_cbNeeded = RT_UOFFSETOF(MYSET, aStructs[RT_ELEMENTS(g_aStructs)]);
+ for (uint32_t i = 0; i < RT_ELEMENTS(g_aStructs); i++)
+ s_cbNeeded += sizeof(MYMEMBER) * g_aStructs[i].cMembers;
+ }
+
+ size_t cbPdb = strlen(pszPdb) + 1;
+ PMYSET pSet = (PMYSET)RTMemAlloc(s_cbNeeded + cbPdb);
+ if (!pSet)
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "Out of memory!\n");
+
+ /*
+ * Copy over the data.
+ */
+ pSet->enmArch = enmArch;
+ memcpy(&pSet->OsVerInfo, pOsVerInfo, sizeof(pSet->OsVerInfo));
+ memcpy(&pSet->aStructs[0], g_aStructs, sizeof(g_aStructs));
+
+ PMYMEMBER pDst = (PMYMEMBER)&pSet->aStructs[RT_ELEMENTS(g_aStructs)];
+ for (uint32_t i = 0; i < RT_ELEMENTS(g_aStructs); i++)
+ {
+ pSet->aStructs[i].paMembers = pDst;
+ memcpy(pDst, g_aStructs[i].paMembers, g_aStructs[i].cMembers * sizeof(*pDst));
+ pDst += g_aStructs[i].cMembers;
+ }
+
+ pSet->pszPdb = (char *)pDst;
+ memcpy(pDst, pszPdb, cbPdb);
+
+ /*
+ * Link it.
+ */
+ PMYSET pInsertBefore;
+ RTListForEach(&g_SetList, pInsertBefore, MYSET, ListEntry)
+ {
+ int iDiff = rtNtOsVerInfoCompare(&pInsertBefore->OsVerInfo, &pSet->OsVerInfo);
+ if (iDiff >= 0)
+ {
+ if (iDiff > 0 || pInsertBefore->enmArch > pSet->enmArch)
+ {
+ RTListNodeInsertBefore(&pInsertBefore->ListEntry, &pSet->ListEntry);
+ return RTEXITCODE_SUCCESS;
+ }
+ }
+ }
+
+ RTListAppend(&g_SetList, &pSet->ListEntry);
+ return RTEXITCODE_SUCCESS;
+}
+
+
+/**
+ * Checks that we found everything.
+ *
+ * @returns Fully complained exit code.
+ */
+static RTEXITCODE checkThatWeFoundEverything(void)
+{
+ RTEXITCODE rcExit = RTEXITCODE_SUCCESS;
+ for (uint32_t i = 0; i < RT_ELEMENTS(g_aStructs); i++)
+ {
+ PMYMEMBER paMembers = g_aStructs[i].paMembers;
+ uint32_t j = g_aStructs[i].cMembers;
+ while (j-- > 0)
+ {
+ if (paMembers[j].off == UINT32_MAX)
+ rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, " Missing %s::%s\n", g_aStructs[i].pszName, paMembers[j].pszName);
+ }
+ }
+ return rcExit;
+}
+
+
+/**
+ * Matches the member against what we're looking for.
+ *
+ * @returns Number of hits.
+ * @param cWantedMembers The number members in paWantedMembers.
+ * @param paWantedMembers The members we're looking for.
+ * @param pszPrefix The member name prefix.
+ * @param pszMember The member name.
+ * @param offMember The member offset.
+ * @param cbMember The member size.
+ */
+static uint32_t matchUpStructMembers(unsigned cWantedMembers, PMYMEMBER paWantedMembers,
+ const char *pszPrefix, const char *pszMember,
+ uint32_t offMember, uint32_t cbMember)
+{
+ size_t cchPrefix = strlen(pszPrefix);
+ uint32_t cHits = 0;
+ uint32_t iMember = cWantedMembers;
+ while (iMember-- > 0)
+ {
+ if ( !strncmp(pszPrefix, paWantedMembers[iMember].pszName, cchPrefix)
+ && !strcmp(pszMember, paWantedMembers[iMember].pszName + cchPrefix))
+ {
+ paWantedMembers[iMember].off = offMember;
+ paWantedMembers[iMember].cb = cbMember;
+ cHits++;
+ }
+ else if (paWantedMembers[iMember].pszzAltNames)
+ {
+ char const *pszCur = paWantedMembers[iMember].pszzAltNames;
+ while (*pszCur)
+ {
+ size_t cchCur = strlen(pszCur);
+ if ( !strncmp(pszPrefix, pszCur, cchPrefix)
+ && !strcmp(pszMember, pszCur + cchPrefix))
+ {
+ paWantedMembers[iMember].off = offMember;
+ paWantedMembers[iMember].cb = cbMember;
+ cHits++;
+ break;
+ }
+ pszCur += cchCur + 1;
+ }
+ }
+ }
+ return cHits;
+}
+
+
+#if 0
+/**
+ * Resets the writable structure members prior to processing a PDB.
+ *
+ * While processing the PDB, will fill in the sizes and offsets of what we find.
+ * Afterwards we'll use look for reset values to see that every structure and
+ * member was located successfully.
+ */
+static void resetMyStructs(void)
+{
+ for (uint32_t i = 0; i < RT_ELEMENTS(g_aStructs); i++)
+ {
+ PMYMEMBER paMembers = g_aStructs[i].paMembers;
+ uint32_t j = g_aStructs[i].cMembers;
+ while (j-- > 0)
+ {
+ paMembers[j].off = UINT32_MAX;
+ paMembers[j].cb = UINT32_MAX;
+ }
+ }
+}
+#endif
+
+
+/**
+ * Find members in the specified structure type (@a idxType).
+ *
+ * @returns Fully bitched exit code.
+ * @param hFake Fake process handle.
+ * @param uModAddr The module address.
+ * @param idxType The type index of the structure which members we're
+ * going to process.
+ * @param cWantedMembers The number of wanted members.
+ * @param paWantedMembers The wanted members. This will be modified.
+ * @param offDisp Displacement when calculating member offsets.
+ * @param pszStructNm The top level structure name.
+ * @param pszPrefix The member name prefix.
+ * @param pszLogTag The log tag.
+ */
+static RTEXITCODE findMembers(HANDLE hFake, uint64_t uModAddr, uint32_t idxType,
+ uint32_t cWantedMembers, PMYMEMBER paWantedMembers,
+ uint32_t offDisp, const char *pszStructNm, const char *pszPrefix, const char *pszLogTag)
+{
+ RTEXITCODE rcExit = RTEXITCODE_SUCCESS;
+
+ DWORD cChildren = 0;
+ if (!SymGetTypeInfo(hFake, uModAddr, idxType, TI_GET_CHILDRENCOUNT, &cChildren))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: TI_GET_CHILDRENCOUNT failed on _KPRCB: %u\n", pszLogTag, GetLastError());
+
+ MyDbgPrintf(" %s: cChildren=%u (%#x)\n", pszStructNm, cChildren);
+ TI_FINDCHILDREN_PARAMS *pChildren;
+ pChildren = (TI_FINDCHILDREN_PARAMS *)alloca(RT_UOFFSETOF_DYN(TI_FINDCHILDREN_PARAMS, ChildId[cChildren]));
+ pChildren->Start = 0;
+ pChildren->Count = cChildren;
+ if (!SymGetTypeInfo(hFake, uModAddr, idxType, TI_FINDCHILDREN, pChildren))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: TI_FINDCHILDREN failed on _KPRCB: %u\n", pszLogTag, GetLastError());
+
+ for (uint32_t i = 0; i < cChildren; i++)
+ {
+ //MyDbgPrintf(" %s: child#%u: TypeIndex=%u\n", pszStructNm, i, pChildren->ChildId[i]);
+ IMAGEHLP_SYMBOL_TYPE_INFO enmErr;
+ PWCHAR pwszMember = NULL;
+ uint32_t idxRefType = 0;
+ uint32_t offMember = 0;
+ uint64_t cbMember = 0;
+ uint32_t cMemberChildren = 0;
+ if ( SymGetTypeInfo(hFake, uModAddr, pChildren->ChildId[i], enmErr = TI_GET_SYMNAME, &pwszMember)
+ && SymGetTypeInfo(hFake, uModAddr, pChildren->ChildId[i], enmErr = TI_GET_OFFSET, &offMember)
+ && SymGetTypeInfo(hFake, uModAddr, pChildren->ChildId[i], enmErr = TI_GET_TYPE, &idxRefType)
+ && SymGetTypeInfo(hFake, uModAddr, idxRefType, enmErr = TI_GET_LENGTH, &cbMember)
+ && SymGetTypeInfo(hFake, uModAddr, idxRefType, enmErr = TI_GET_CHILDRENCOUNT, &cMemberChildren)
+ )
+ {
+ offMember += offDisp;
+
+ char *pszMember;
+ int rc = RTUtf16ToUtf8(pwszMember, &pszMember);
+ if (RT_SUCCESS(rc))
+ {
+ matchUpStructMembers(cWantedMembers, paWantedMembers, pszPrefix, pszMember, offMember, cbMember);
+
+ /*
+ * Gather more info and do some debug printing. We'll use some
+ * of this info below when recursing into sub-structures
+ * and arrays.
+ */
+ uint32_t fNested = 0; SymGetTypeInfo(hFake, uModAddr, idxRefType, TI_GET_NESTED, &fNested);
+ uint32_t uDataKind = 0; SymGetTypeInfo(hFake, uModAddr, idxRefType, TI_GET_DATAKIND, &uDataKind);
+ uint32_t uBaseType = 0; SymGetTypeInfo(hFake, uModAddr, idxRefType, TI_GET_BASETYPE, &uBaseType);
+ uint32_t uMembTag = 0; SymGetTypeInfo(hFake, uModAddr, pChildren->ChildId[i], TI_GET_SYMTAG, &uMembTag);
+ uint32_t uBaseTag = 0; SymGetTypeInfo(hFake, uModAddr, idxRefType, TI_GET_SYMTAG, &uBaseTag);
+ uint32_t cElements = 0; SymGetTypeInfo(hFake, uModAddr, idxRefType, TI_GET_COUNT, &cElements);
+ uint32_t idxArrayType = 0; SymGetTypeInfo(hFake, uModAddr, idxRefType, TI_GET_ARRAYINDEXTYPEID, &idxArrayType);
+ MyDbgPrintf(" %#06x LB %#06llx %c%c %2d %2d %2d %2d %2d %4d %s::%s%s\n",
+ offMember, cbMember,
+ cMemberChildren > 0 ? 'c' : '-',
+ fNested != 0 ? 'n' : '-',
+ uDataKind,
+ uBaseType,
+ uMembTag,
+ uBaseTag,
+ cElements,
+ idxArrayType,
+ pszStructNm,
+ pszPrefix,
+ pszMember);
+
+ /*
+ * Recurse into children.
+ */
+ if (cMemberChildren > 0)
+ {
+ size_t cbNeeded = strlen(pszMember) + strlen(pszPrefix) + sizeof(".");
+ char *pszSubPrefix = (char *)RTMemTmpAlloc(cbNeeded);
+ if (pszSubPrefix)
+ {
+ strcat(strcat(strcpy(pszSubPrefix, pszPrefix), pszMember), ".");
+ RTEXITCODE rcExit2 = findMembers(hFake, uModAddr, idxRefType, cWantedMembers,
+ paWantedMembers, offMember,
+ pszStructNm,
+ pszSubPrefix,
+ pszLogTag);
+ if (rcExit2 != RTEXITCODE_SUCCESS)
+ rcExit = rcExit2;
+ RTMemTmpFree(pszSubPrefix);
+ }
+ else
+ rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "out of memory\n");
+ }
+ /*
+ * Recurse into arrays too.
+ */
+ else if (cElements > 0 && idxArrayType > 0)
+ {
+ BOOL fRc;
+ uint32_t idxElementRefType = 0;
+ fRc = SymGetTypeInfo(hFake, uModAddr, idxRefType, TI_GET_TYPE, &idxElementRefType); Assert(fRc);
+ uint64_t cbElement = cbMember / cElements;
+ fRc = SymGetTypeInfo(hFake, uModAddr, idxElementRefType, TI_GET_LENGTH, &cbElement); Assert(fRc);
+ MyDbgPrintf("idxArrayType=%u idxElementRefType=%u cbElement=%u\n", idxArrayType, idxElementRefType, cbElement);
+
+ size_t cbNeeded = strlen(pszMember) + strlen(pszPrefix) + sizeof("[xxxxxxxxxxxxxxxx].");
+ char *pszSubPrefix = (char *)RTMemTmpAlloc(cbNeeded);
+ if (pszSubPrefix)
+ {
+ for (uint32_t iElement = 0; iElement < cElements; iElement++)
+ {
+ RTStrPrintf(pszSubPrefix, cbNeeded, "%s%s[%u].", pszPrefix, pszMember, iElement);
+ RTEXITCODE rcExit2 = findMembers(hFake, uModAddr, idxElementRefType, cWantedMembers,
+ paWantedMembers,
+ offMember + iElement * cbElement,
+ pszStructNm,
+ pszSubPrefix,
+ pszLogTag);
+ if (rcExit2 != RTEXITCODE_SUCCESS)
+ rcExit = rcExit2;
+ }
+ RTMemTmpFree(pszSubPrefix);
+ }
+ else
+ rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "out of memory\n");
+ }
+
+ RTStrFree(pszMember);
+ }
+ else
+ rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: RTUtf16ToUtf8 failed on %s child#%u: %Rrc\n",
+ pszLogTag, pszStructNm, i, rc);
+ }
+ /* TI_GET_OFFSET fails on bitfields, so just ignore+skip those. */
+ else if (enmErr != TI_GET_OFFSET || GetLastError() != ERROR_INVALID_FUNCTION)
+ rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: SymGetTypeInfo(,,,%d,) failed on %s child#%u: %u\n",
+ pszLogTag, enmErr, pszStructNm, i, GetLastError());
+ LocalFree(pwszMember);
+ } /* For each child. */
+
+ return rcExit;
+}
+
+
+/**
+ * Lookup up structures and members in the given module.
+ *
+ * @returns Fully bitched exit code.
+ * @param hFake Fake process handle.
+ * @param uModAddr The module address.
+ * @param pszLogTag The log tag.
+ * @param pszPdb The full PDB path.
+ * @param pOsVerInfo The OS version info for altering the error handling
+ * for older OSes.
+ */
+static RTEXITCODE findStructures(HANDLE hFake, uint64_t uModAddr, const char *pszLogTag, const char *pszPdb,
+ PCRTNTSDBOSVER pOsVerInfo)
+{
+ RTEXITCODE rcExit = RTEXITCODE_SUCCESS;
+ PSYMBOL_INFO pSymInfo = (PSYMBOL_INFO)alloca(sizeof(*pSymInfo));
+ for (uint32_t iStruct = 0; iStruct < RT_ELEMENTS(g_aStructs); iStruct++)
+ {
+ pSymInfo->SizeOfStruct = sizeof(*pSymInfo);
+ pSymInfo->MaxNameLen = 0;
+ if (!SymGetTypeFromName(hFake, uModAddr, g_aStructs[iStruct].pszName, pSymInfo))
+ {
+ if (!(pOsVerInfo->uMajorVer == 5 && pOsVerInfo->uMinorVer == 0) /* w2k */)
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: Failed to find _KPRCB: %u\n", pszPdb, GetLastError());
+ RTMsgInfo("%s: Skipping - failed to find _KPRCB: %u\n", pszPdb, GetLastError());
+ return RTEXITCODE_SKIPPED;
+ }
+
+ MyDbgPrintf(" %s: TypeIndex=%u\n", g_aStructs[iStruct].pszName, pSymInfo->TypeIndex);
+ MyDbgPrintf(" %s: Size=%u (%#x)\n", g_aStructs[iStruct].pszName, pSymInfo->Size, pSymInfo->Size);
+
+ rcExit = findMembers(hFake, uModAddr, pSymInfo->TypeIndex,
+ g_aStructs[iStruct].cMembers, g_aStructs[iStruct].paMembers, 0 /* offDisp */,
+ g_aStructs[iStruct].pszName, "", pszLogTag);
+ if (rcExit != RTEXITCODE_SUCCESS)
+ return rcExit;
+ } /* for each struct we want */
+ return rcExit;
+}
+
+
+#if 0 /* unused */
+static bool strIEndsWith(const char *pszString, const char *pszSuffix)
+{
+ size_t cchString = strlen(pszString);
+ size_t cchSuffix = strlen(pszSuffix);
+ if (cchString < cchSuffix)
+ return false;
+ return RTStrICmp(pszString + cchString - cchSuffix, pszSuffix) == 0;
+}
+#endif
+
+
+/**
+ * Use various hysterics to figure out the OS version details from the PDB path.
+ *
+ * This ASSUMES quite a bunch of things:
+ * -# Working on unpacked symbol packages. This does not work for
+ * windbg symbol stores/caches.
+ * -# The symbol package has been unpacked into a directory with the same
+ * name as the symbol package (sans suffixes).
+ *
+ * @returns Fully complained exit code.
+ * @param pszPdb The path to the PDB.
+ * @param pVerInfo Where to return the version info.
+ * @param penmArch Where to return the architecture.
+ */
+static RTEXITCODE FigurePdbVersionInfo(const char *pszPdb, PRTNTSDBOSVER pVerInfo, MYARCH *penmArch)
+{
+ /*
+ * Split the path.
+ */
+ union
+ {
+ RTPATHSPLIT Split;
+ uint8_t abPad[RTPATH_MAX + 1024];
+ } u;
+ int rc = RTPathSplit(pszPdb, &u.Split, sizeof(u), 0);
+ if (RT_FAILURE(rc))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "RTPathSplit failed on '%s': %Rrc", pszPdb, rc);
+ if (!(u.Split.fProps & RTPATH_PROP_FILENAME))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "RTPATH_PROP_FILENAME not set for: '%s'", pszPdb);
+ const char *pszFilename = u.Split.apszComps[u.Split.cComps - 1];
+
+ /*
+ * SMP or UNI kernel?
+ */
+ if ( !RTStrICmp(pszFilename, "ntkrnlmp.pdb")
+ || !RTStrICmp(pszFilename, "ntkrpamp.pdb")
+ )
+ pVerInfo->fSmp = true;
+ else if ( !RTStrICmp(pszFilename, "ntoskrnl.pdb")
+ || !RTStrICmp(pszFilename, "ntkrnlpa.pdb")
+ )
+ pVerInfo->fSmp = false;
+ else
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "Doesn't recognize the filename '%s'...", pszFilename);
+
+ /*
+ * Look for symbol pack names in the path. This is stuff like:
+ * - WindowsVista.6002.090410-1830.x86fre
+ * - WindowsVista.6002.090410-1830.amd64chk
+ * - Windows_Win7.7600.16385.090713-1255.X64CHK
+ * - Windows_Win7SP1.7601.17514.101119-1850.AMD64FRE
+ * - Windows_Win8.9200.16384.120725-1247.X86CHK
+ * - en_windows_8_1_symbols_debug_checked_x64_2712568
+ */
+ uint32_t i = u.Split.cComps - 1;
+ while (i-- > 0)
+ {
+ static struct
+ {
+ const char *pszPrefix;
+ size_t cchPrefix;
+ uint8_t uMajorVer;
+ uint8_t uMinorVer;
+ uint8_t uCsdNo;
+ uint32_t uBuildNo; /**< UINT32_MAX means the number immediately after the prefix. */
+ } const s_aSymPacks[] =
+ {
+ { RT_STR_TUPLE("w2kSP1SYM"), 5, 0, 1, 2195 },
+ { RT_STR_TUPLE("w2ksp2srp1"), 5, 0, 2, 2195 },
+ { RT_STR_TUPLE("w2ksp2sym"), 5, 0, 2, 2195 },
+ { RT_STR_TUPLE("w2ksp3sym"), 5, 0, 3, 2195 },
+ { RT_STR_TUPLE("w2ksp4sym"), 5, 0, 4, 2195 },
+ { RT_STR_TUPLE("Windows2000-KB891861"), 5, 0, 4, 2195 },
+ { RT_STR_TUPLE("windowsxp"), 5, 1, 0, 2600 },
+ { RT_STR_TUPLE("xpsp1sym"), 5, 1, 1, 2600 },
+ { RT_STR_TUPLE("WindowsXP-KB835935-SP2-"), 5, 1, 2, 2600 },
+ { RT_STR_TUPLE("WindowsXP-KB936929-SP3-"), 5, 1, 3, 2600 },
+ { RT_STR_TUPLE("Windows2003."), 5, 2, 0, 3790 },
+ { RT_STR_TUPLE("Windows2003_sp1."), 5, 2, 1, 3790 },
+ { RT_STR_TUPLE("WindowsServer2003-KB933548-v1"), 5, 2, 1, 3790 },
+ { RT_STR_TUPLE("WindowsVista.6000."), 6, 0, 0, 6000 },
+ { RT_STR_TUPLE("Windows_Longhorn.6001."), 6, 0, 1, 6001 }, /* incl w2k8 */
+ { RT_STR_TUPLE("WindowsVista.6002."), 6, 0, 2, 6002 }, /* incl w2k8 */
+ { RT_STR_TUPLE("Windows_Winmain.7000"), 6, 1, 0, 7000 }, /* Beta */
+ { RT_STR_TUPLE("Windows_Winmain.7100"), 6, 1, 0, 7100 }, /* RC */
+ { RT_STR_TUPLE("Windows_Win7.7600"), 6, 1, 0, 7600 }, /* RC */
+ { RT_STR_TUPLE("Windows_Win7SP1.7601"), 6, 1, 1, 7601 }, /* RC */
+ { RT_STR_TUPLE("Windows_Winmain.8102"), 6, 2, 0, 8102 }, /* preview */
+ { RT_STR_TUPLE("Windows_Winmain.8250"), 6, 2, 0, 8250 }, /* beta */
+ { RT_STR_TUPLE("Windows_Winmain.8400"), 6, 2, 0, 8400 }, /* RC */
+ { RT_STR_TUPLE("Windows_Win8.9200"), 6, 2, 0, 9200 }, /* RTM */
+ { RT_STR_TUPLE("en_windows_8_1"), 6, 3, 0, 9600 }, /* RTM */
+ { RT_STR_TUPLE("en_windows_10_symbols_"), 10, 0, 0,10240 }, /* RTM */
+ { RT_STR_TUPLE("en_windows_10_symbols_"), 10, 0, 0,10240 }, /* RTM */
+ { RT_STR_TUPLE("en_windows_10_17134_"), 10, 0, 0,17134 }, /* 1803 */
+ };
+
+ const char *pszComp = u.Split.apszComps[i];
+ uint32_t iSymPack = RT_ELEMENTS(s_aSymPacks);
+ while (iSymPack-- > 0)
+ if (!RTStrNICmp(pszComp, s_aSymPacks[iSymPack].pszPrefix, s_aSymPacks[iSymPack].cchPrefix))
+ break;
+ if (iSymPack >= RT_ELEMENTS(s_aSymPacks))
+ continue;
+
+ pVerInfo->uMajorVer = s_aSymPacks[iSymPack].uMajorVer;
+ pVerInfo->uMinorVer = s_aSymPacks[iSymPack].uMinorVer;
+ pVerInfo->uCsdNo = s_aSymPacks[iSymPack].uCsdNo;
+ pVerInfo->fChecked = false;
+ pVerInfo->uBuildNo = s_aSymPacks[iSymPack].uBuildNo;
+
+ /* Parse build number if necessary. */
+ if (s_aSymPacks[iSymPack].uBuildNo == UINT32_MAX)
+ {
+ char *pszNext;
+ rc = RTStrToUInt32Ex(pszComp + s_aSymPacks[iSymPack].cchPrefix, &pszNext, 10, &pVerInfo->uBuildNo);
+ if (RT_FAILURE(rc))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "Failed to decode build number in '%s': %Rrc", pszComp, rc);
+ if (*pszNext != '.' && *pszNext != '_' && *pszNext != '-')
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "Failed to decode build number in '%s': '%c'", pszComp, *pszNext);
+ }
+
+ /* Look for build arch and checked/free. */
+ if ( RTStrIStr(pszComp, ".x86.chk.")
+ || RTStrIStr(pszComp, ".x86chk.")
+ || RTStrIStr(pszComp, "_x86_chk_")
+ || RTStrIStr(pszComp, "_x86chk_")
+ || RTStrIStr(pszComp, "-x86-DEBUG")
+ || (RTStrIStr(pszComp, "-x86-") && RTStrIStr(pszComp, "-DEBUG"))
+ || RTStrIStr(pszComp, "_debug_checked_x86")
+ )
+ {
+ pVerInfo->fChecked = true;
+ *penmArch = MYARCH_X86;
+ }
+ else if ( RTStrIStr(pszComp, ".amd64.chk.")
+ || RTStrIStr(pszComp, ".amd64chk.")
+ || RTStrIStr(pszComp, ".x64.chk.")
+ || RTStrIStr(pszComp, ".x64chk.")
+ || RTStrIStr(pszComp, "_debug_checked_x64")
+ )
+ {
+ pVerInfo->fChecked = true;
+ *penmArch = MYARCH_AMD64;
+ }
+ else if ( RTStrIStr(pszComp, ".amd64.fre.")
+ || RTStrIStr(pszComp, ".amd64fre.")
+ || RTStrIStr(pszComp, ".x64.fre.")
+ || RTStrIStr(pszComp, ".x64fre.")
+ )
+ {
+ pVerInfo->fChecked = false;
+ *penmArch = MYARCH_AMD64;
+ }
+ else if ( RTStrIStr(pszComp, "DEBUG")
+ || RTStrIStr(pszComp, "_chk")
+ )
+ {
+ pVerInfo->fChecked = true;
+ *penmArch = MYARCH_X86;
+ }
+ else if (RTStrIStr(pszComp, "_x64"))
+ {
+ pVerInfo->fChecked = false;
+ *penmArch = MYARCH_AMD64;
+ }
+ else
+ {
+ pVerInfo->fChecked = false;
+ *penmArch = MYARCH_X86;
+ }
+ return RTEXITCODE_SUCCESS;
+ }
+
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "Giving up on '%s'...\n", pszPdb);
+}
+
+
+/**
+ * Process one PDB.
+ *
+ * @returns Fully bitched exit code.
+ * @param pszPdb The path to the PDB.
+ */
+static RTEXITCODE processPdb(const char *pszPdb)
+{
+ /*
+ * We need the size later on, so get that now and present proper IPRT error
+ * info if the file is missing or inaccessible.
+ */
+ RTFSOBJINFO ObjInfo;
+ int rc = RTPathQueryInfoEx(pszPdb, &ObjInfo, RTFSOBJATTRADD_NOTHING, RTPATH_F_FOLLOW_LINK);
+ if (RT_FAILURE(rc))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "RTPathQueryInfo fail on '%s': %Rrc\n", pszPdb, rc);
+
+ /*
+ * Figure the windows version details for the given PDB.
+ */
+ MYARCH enmArch;
+ RTNTSDBOSVER OsVerInfo;
+ RTEXITCODE rcExit = FigurePdbVersionInfo(pszPdb, &OsVerInfo, &enmArch);
+ if (rcExit != RTEXITCODE_SUCCESS)
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "Failed to figure the OS version info for '%s'.\n'", pszPdb);
+
+ /*
+ * Create a fake handle and open the PDB.
+ */
+ static uintptr_t s_iHandle = 0;
+ HANDLE hFake = (HANDLE)++s_iHandle;
+ if (!SymInitialize(hFake, NULL, FALSE))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "SymInitialied failed: %u\n", GetLastError());
+
+ uint64_t uModAddr = UINT64_C(0x1000000);
+ uModAddr = SymLoadModuleEx(hFake, NULL /*hFile*/, pszPdb, NULL /*pszModuleName*/,
+ uModAddr, ObjInfo.cbObject, NULL /*pData*/, 0 /*fFlags*/);
+ if (uModAddr != 0)
+ {
+ MyDbgPrintf("*** uModAddr=%#llx \"%s\" ***\n", uModAddr, pszPdb);
+
+ char szLogTag[32];
+ RTStrCopy(szLogTag, sizeof(szLogTag), RTPathFilename(pszPdb));
+
+ /*
+ * Find the structures.
+ */
+ rcExit = findStructures(hFake, uModAddr, szLogTag, pszPdb, &OsVerInfo);
+ if (rcExit == RTEXITCODE_SUCCESS)
+ rcExit = checkThatWeFoundEverything();
+ if (rcExit == RTEXITCODE_SUCCESS)
+ {
+ /*
+ * Save the details for later when we produce the header.
+ */
+ rcExit = saveStructures(&OsVerInfo, enmArch, pszPdb);
+ }
+ }
+ else
+ rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "SymLoadModuleEx failed: %u\n", GetLastError());
+
+ if (!SymCleanup(hFake))
+ rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "SymCleanup failed: %u\n", GetLastError());
+
+ if (rcExit == RTEXITCODE_SKIPPED)
+ rcExit = RTEXITCODE_SUCCESS;
+ return rcExit;
+}
+
+
+/** The size of the directory entry buffer we're using. */
+#define MY_DIRENTRY_BUF_SIZE (sizeof(RTDIRENTRYEX) + RTPATH_MAX)
+
+/**
+ * Checks if the name is of interest to us.
+ *
+ * @returns true/false.
+ * @param pszName The name.
+ * @param cchName The length of the name.
+ */
+static bool isInterestingName(const char *pszName, size_t cchName)
+{
+ static struct { const char *psz; size_t cch; } const s_aNames[] =
+ {
+ RT_STR_TUPLE("ntoskrnl.pdb"),
+ RT_STR_TUPLE("ntkrnlmp.pdb"),
+ RT_STR_TUPLE("ntkrnlpa.pdb"),
+ RT_STR_TUPLE("ntkrpamp.pdb"),
+ };
+
+ if ( cchName == s_aNames[0].cch
+ && (pszName[0] == 'n' || pszName[0] == 'N')
+ && (pszName[1] == 't' || pszName[1] == 'T')
+ )
+ {
+ int i = RT_ELEMENTS(s_aNames);
+ while (i-- > 0)
+ if ( s_aNames[i].cch == cchName
+ && !RTStrICmp(s_aNames[i].psz, pszName))
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ * Recursively processes relevant files in the specified directory.
+ *
+ * @returns Fully complained exit code.
+ * @param pszDir Pointer to the directory buffer.
+ * @param cchDir The length of pszDir in pszDir.
+ * @param pDirEntry Pointer to the directory buffer.
+ * @param iLogDepth The logging depth.
+ */
+static RTEXITCODE processDirSub(char *pszDir, size_t cchDir, PRTDIRENTRYEX pDirEntry, int iLogDepth)
+{
+ Assert(cchDir > 0); Assert(pszDir[cchDir] == '\0');
+
+ /* Make sure we've got some room in the path, to save us extra work further down. */
+ if (cchDir + 3 >= RTPATH_MAX)
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "Path too long: '%s'\n", pszDir);
+
+ /* Open directory. */
+ RTDIR hDir;
+ int rc = RTDirOpen(&hDir, pszDir);
+ if (RT_FAILURE(rc))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "RTDirOpen failed on '%s': %Rrc\n", pszDir, rc);
+
+ /* Ensure we've got a trailing slash (there is space for it see above). */
+ if (!RTPATH_IS_SEP(pszDir[cchDir - 1]))
+ {
+ pszDir[cchDir++] = RTPATH_SLASH;
+ pszDir[cchDir] = '\0';
+ }
+
+ /*
+ * Process the files and subdirs.
+ */
+ RTEXITCODE rcExit = RTEXITCODE_SUCCESS;
+ for (;;)
+ {
+ /* Get the next directory. */
+ size_t cbDirEntry = MY_DIRENTRY_BUF_SIZE;
+ rc = RTDirReadEx(hDir, pDirEntry, &cbDirEntry, RTFSOBJATTRADD_UNIX, RTPATH_F_ON_LINK);
+ if (RT_FAILURE(rc))
+ break;
+
+ /* Skip the dot and dot-dot links. */
+ if (RTDirEntryExIsStdDotLink(pDirEntry))
+ continue;
+
+ /* Check length. */
+ if (pDirEntry->cbName + cchDir + 3 >= RTPATH_MAX)
+ {
+ rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "Path too long: '%s' in '%.*s'\n", pDirEntry->szName, cchDir, pszDir);
+ break;
+ }
+
+ if (RTFS_IS_FILE(pDirEntry->Info.Attr.fMode))
+ {
+ /*
+ * Process debug info files of interest.
+ */
+ if (isInterestingName(pDirEntry->szName, pDirEntry->cbName))
+ {
+ memcpy(&pszDir[cchDir], pDirEntry->szName, pDirEntry->cbName + 1);
+ RTEXITCODE rcExit2 = processPdb(pszDir);
+ if (rcExit2 != RTEXITCODE_SUCCESS)
+ rcExit = rcExit2;
+ }
+ }
+ else if (RTFS_IS_DIRECTORY(pDirEntry->Info.Attr.fMode))
+ {
+ /*
+ * Recurse into the subdirectory. In order to speed up Win7+
+ * symbol pack traversals, we skip directories with ".pdb" suffixes
+ * unless they match any of the .pdb files we're looking for.
+ *
+ * Note! When we get back pDirEntry will be invalid.
+ */
+ if ( pDirEntry->cbName <= 4
+ || RTStrICmp(&pDirEntry->szName[pDirEntry->cbName - 4], ".pdb")
+ || isInterestingName(pDirEntry->szName, pDirEntry->cbName))
+ {
+ memcpy(&pszDir[cchDir], pDirEntry->szName, pDirEntry->cbName + 1);
+ if (iLogDepth > 0)
+ RTMsgInfo("%s%s ...\n", pszDir, RTPATH_SLASH_STR);
+ RTEXITCODE rcExit2 = processDirSub(pszDir, cchDir + pDirEntry->cbName, pDirEntry, iLogDepth - 1);
+ if (rcExit2 != RTEXITCODE_SUCCESS)
+ rcExit = rcExit2;
+ }
+ }
+ }
+ if (rc != VERR_NO_MORE_FILES)
+ rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "RTDirReadEx failed: %Rrc\npszDir=%.*s", rc, cchDir, pszDir);
+
+ rc = RTDirClose(hDir);
+ if (RT_FAILURE(rc))
+ rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "RTDirClose failed: %Rrc\npszDir=%.*s", rc, cchDir, pszDir);
+ return rcExit;
+}
+
+
+/**
+ * Recursively processes relevant files in the specified directory.
+ *
+ * @returns Fully complained exit code.
+ * @param pszDir The directory to search.
+ */
+static RTEXITCODE processDir(const char *pszDir)
+{
+ char szPath[RTPATH_MAX];
+ int rc = RTPathAbs(pszDir, szPath, sizeof(szPath));
+ if (RT_FAILURE(rc))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "RTPathAbs failed on '%s': %Rrc\n", pszDir, rc);
+
+ union
+ {
+ uint8_t abPadding[MY_DIRENTRY_BUF_SIZE];
+ RTDIRENTRYEX DirEntry;
+ } uBuf;
+ return processDirSub(szPath, strlen(szPath), &uBuf.DirEntry, g_iOptVerbose);
+}
+
+
+int main(int argc, char **argv)
+{
+ int rc = RTR3InitExe(argc, &argv, 0 /*fFlags*/);
+ if (RT_FAILURE(rc))
+ return RTMsgInitFailure(rc);
+
+ RTListInit(&g_SetList);
+
+ /*
+ * Parse options.
+ */
+ static const RTGETOPTDEF s_aOptions[] =
+ {
+ { "--force", 'f', RTGETOPT_REQ_NOTHING },
+ { "--output", 'o', RTGETOPT_REQ_STRING },
+ { "--verbose", 'v', RTGETOPT_REQ_NOTHING },
+ { "--quiet", 'q', RTGETOPT_REQ_NOTHING },
+ };
+
+ RTEXITCODE rcExit = RTEXITCODE_SUCCESS;
+ const char *pszOutput = "-";
+
+ int ch;
+ RTGETOPTUNION ValueUnion;
+ RTGETOPTSTATE GetState;
+ RTGetOptInit(&GetState, argc, argv, s_aOptions, RT_ELEMENTS(s_aOptions), 1,
+ RTGETOPTINIT_FLAGS_OPTS_FIRST);
+ while ((ch = RTGetOpt(&GetState, &ValueUnion)) != 0)
+ {
+ switch (ch)
+ {
+ case 'f':
+ g_fOptForce = true;
+ break;
+
+ case 'v':
+ g_iOptVerbose++;
+ break;
+
+ case 'q':
+ g_iOptVerbose++;
+ break;
+
+ case 'o':
+ pszOutput = ValueUnion.psz;
+ break;
+
+ case 'V':
+ RTPrintf("$Revision: 127862 $");
+ break;
+
+ case 'h':
+ RTPrintf("usage: %s [-v|--verbose] [-q|--quiet] [-f|--force] [-o|--output <file.h>] <dir1|pdb1> [...]\n"
+ " or: %s [-V|--version]\n"
+ " or: %s [-h|--help]\n",
+ argv[0], argv[0], argv[0]);
+ return RTEXITCODE_SUCCESS;
+
+ case VINF_GETOPT_NOT_OPTION:
+ {
+ RTEXITCODE rcExit2;
+ if (RTFileExists(ValueUnion.psz))
+ rcExit2 = processPdb(ValueUnion.psz);
+ else
+ rcExit2 = processDir(ValueUnion.psz);
+ if (rcExit2 != RTEXITCODE_SUCCESS)
+ {
+ if (!g_fOptForce)
+ return rcExit2;
+ rcExit = rcExit2;
+ }
+ break;
+ }
+
+ default:
+ return RTGetOptPrintError(ch, &ValueUnion);
+ }
+ }
+ if (RTListIsEmpty(&g_SetList))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "No usable debug files found.\n");
+
+ /*
+ * Generate the output.
+ */
+ PRTSTREAM pOut = g_pStdOut;
+ if (strcmp(pszOutput, "-"))
+ {
+ rc = RTStrmOpen(pszOutput, "w", &pOut);
+ if (RT_FAILURE(rc))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "Error opening '%s' for writing: %Rrc\n", pszOutput, rc);
+ }
+
+ generateHeader(pOut);
+
+ if (pOut != g_pStdOut)
+ rc = RTStrmClose(pOut);
+ else
+ rc = RTStrmFlush(pOut);
+ if (RT_FAILURE(rc))
+ return RTMsgErrorExit(RTEXITCODE_FAILURE, "Error %s '%s': %Rrc\n", pszOutput,
+ pOut != g_pStdOut ? "closing" : "flushing", rc);
+ return rcExit;
+}
diff --git a/src/VBox/Runtime/r0drv/nt/process-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/process-r0drv-nt.cpp
new file mode 100644
index 00000000..81662122
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/process-r0drv-nt.cpp
@@ -0,0 +1,45 @@
+/* $Id: process-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Process, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+#include <iprt/process.h>
+
+
+RTDECL(RTPROCESS) RTProcSelf(void)
+{
+ return (RTPROCESS)(uintptr_t)PsGetCurrentProcessId();
+}
+
+
+RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void)
+{
+ return (RTR0PROCESS)PsGetCurrentProcess();
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/semevent-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/semevent-r0drv-nt.cpp
new file mode 100644
index 00000000..0a04b7cc
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/semevent-r0drv-nt.cpp
@@ -0,0 +1,277 @@
+/* $Id: semevent-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Single Release Event Semaphores, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENT_WITHOUT_REMAPPING
+#include "the-nt-kernel.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+#include <iprt/time.h>
+#include <iprt/timer.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * NT event semaphore.
+ */
+typedef struct RTSEMEVENTINTERNAL
+{
+ /** Magic value (RTSEMEVENT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** The NT Event object. */
+ KEVENT Event;
+} RTSEMEVENTINTERNAL, *PRTSEMEVENTINTERNAL;
+
+
+RTDECL(int) RTSemEventCreate(PRTSEMEVENT phEventSem)
+{
+ return RTSemEventCreateEx(phEventSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...)
+{
+ AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER);
+ Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL));
+ AssertCompile(sizeof(RTSEMEVENTINTERNAL) > sizeof(void *));
+ RT_NOREF2(hClass, pszNameFmt);
+
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMEVENT_MAGIC;
+ pThis->cRefs = 1;
+ KeInitializeEvent(&pThis->Event, SynchronizationEvent, FALSE /* not signalled */);
+
+ *phEventSem = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Retains a reference to the semaphore.
+ *
+ * @param pThis The semaphore to retain.
+ */
+DECLINLINE(void) rtR0SemEventNtRetain(PRTSEMEVENTINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs < 100000); NOREF(cRefs);
+}
+
+
+/**
+ * Releases a reference to the semaphore.
+ *
+ * @param pThis The semaphore to release
+ */
+DECLINLINE(void) rtR0SemEventNtRelease(PRTSEMEVENTINTERNAL pThis)
+{
+ if (ASMAtomicDecU32(&pThis->cRefs) == 0)
+ RTMemFree(pThis);
+}
+
+
+RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = hEventSem;
+ if (pThis == NIL_RTSEMEVENT)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+
+ /*
+ * Invalidate it and signal the object just in case.
+ */
+ ASMAtomicIncU32(&pThis->u32Magic);
+ KeSetEvent(&pThis->Event, 0xfff, FALSE);
+ rtR0SemEventNtRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ rtR0SemEventNtRetain(pThis);
+
+ /*
+ * Signal the event object.
+ */
+ KeSetEvent(&pThis->Event, 1, FALSE);
+
+ rtR0SemEventNtRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+
+/**
+ * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventWaitEx.
+ * @param uTimeout See RTSemEventWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+DECLINLINE(int) rtR0SemEventNtWait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ /*
+ * Validate input.
+ */
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ NOREF(pSrcPos);
+
+ rtR0SemEventNtRetain(pThis);
+
+ /*
+ * Convert the timeout to a relative one because KeWaitForSingleObject
+ * takes system time instead of interrupt time as input for absolute
+ * timeout specifications. So, we're best of by giving it relative time.
+ *
+ * Lazy bird converts uTimeout to relative nanoseconds and then to Nt time.
+ */
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
+ uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000)
+ ? uTimeout * UINT32_C(1000000)
+ : UINT64_MAX;
+ if (uTimeout == UINT64_MAX)
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ {
+ if (fFlags & RTSEMWAIT_FLAGS_ABSOLUTE)
+ {
+ uint64_t u64Now = RTTimeSystemNanoTS();
+ uTimeout = u64Now < uTimeout
+ ? uTimeout - u64Now
+ : 0;
+ }
+ }
+ }
+
+ /*
+ * Wait for it.
+ * We're assuming interruptible waits should happen at UserMode level.
+ */
+ NTSTATUS rcNt;
+ BOOLEAN fInterruptible = !!(fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE);
+ KPROCESSOR_MODE WaitMode = fInterruptible ? UserMode : KernelMode;
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ rcNt = KeWaitForSingleObject(&pThis->Event, Executive, WaitMode, fInterruptible, NULL);
+ else
+ {
+ LARGE_INTEGER Timeout;
+ Timeout.QuadPart = -(int64_t)(uTimeout / 100);
+ rcNt = KeWaitForSingleObject(&pThis->Event, Executive, WaitMode, fInterruptible, &Timeout);
+ }
+ int rc;
+ if (pThis->u32Magic == RTSEMEVENT_MAGIC)
+ {
+ switch (rcNt)
+ {
+ case STATUS_SUCCESS:
+ rc = VINF_SUCCESS;
+ break;
+ case STATUS_ALERTED:
+ rc = VERR_INTERRUPTED;
+ break;
+ case STATUS_USER_APC:
+ rc = VERR_INTERRUPTED;
+ break;
+ case STATUS_TIMEOUT:
+ rc = VERR_TIMEOUT;
+ break;
+ default:
+ AssertMsgFailed(("pThis->u32Magic=%RX32 pThis=%p: wait returned %lx!\n",
+ pThis->u32Magic, pThis, (long)rcNt));
+ rc = VERR_INTERNAL_ERROR_4;
+ break;
+ }
+ }
+ else
+ rc = VERR_SEM_DESTROYED;
+
+ rtR0SemEventNtRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventWaitEx(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventNtWait(hEventSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventNtWait(hEventSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+
+
+RTDECL(int) RTSemEventWaitExDebug(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventNtWait(hEventSem, fFlags, uTimeout, &SrcPos);
+}
+
+
+RTDECL(uint32_t) RTSemEventGetResolution(void)
+{
+ return RTTimerGetSystemGranularity();
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/semeventmulti-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/semeventmulti-r0drv-nt.cpp
new file mode 100644
index 00000000..0889a801
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/semeventmulti-r0drv-nt.cpp
@@ -0,0 +1,300 @@
+/* $Id: semeventmulti-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Multiple Release Event Semaphores, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENTMULTI_WITHOUT_REMAPPING
+#include "the-nt-kernel.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+#include <iprt/time.h>
+#include <iprt/timer.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * NT event semaphore.
+ */
+typedef struct RTSEMEVENTMULTIINTERNAL
+{
+ /** Magic value (RTSEMEVENTMULTI_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** The NT Event object. */
+ KEVENT Event;
+} RTSEMEVENTMULTIINTERNAL, *PRTSEMEVENTMULTIINTERNAL;
+
+
+RTDECL(int) RTSemEventMultiCreate(PRTSEMEVENTMULTI phEventMultiSem)
+{
+ return RTSemEventMultiCreateEx(phEventMultiSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass,
+ const char *pszNameFmt, ...)
+{
+ AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
+ RT_NOREF2(hClass, pszNameFmt);
+
+ AssertCompile(sizeof(RTSEMEVENTMULTIINTERNAL) > sizeof(void *));
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMEVENTMULTI_MAGIC;
+ pThis->cRefs = 1;
+ KeInitializeEvent(&pThis->Event, NotificationEvent, FALSE /* not signalled */);
+
+ *phEventMultiSem = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Retains a reference to the semaphore.
+ *
+ * @param pThis The semaphore to retain.
+ */
+DECLINLINE(void) rtR0SemEventMultiNtRetain(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs < 100000); NOREF(cRefs);
+}
+
+
+/**
+ * Releases a reference to the semaphore.
+ *
+ * @param pThis The semaphore to release
+ */
+DECLINLINE(void) rtR0SemEventMultiNtRelease(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ if (ASMAtomicDecU32(&pThis->cRefs) == 0)
+ RTMemFree(pThis);
+}
+
+
+RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (pThis == NIL_RTSEMEVENTMULTI)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+
+ /*
+ * Invalidate it and signal the object just in case.
+ */
+ ASMAtomicIncU32(&pThis->u32Magic);
+ KeSetEvent(&pThis->Event, 0xfff, FALSE);
+ rtR0SemEventMultiNtRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiNtRetain(pThis);
+
+ /*
+ * Signal the event object.
+ */
+ KeSetEvent(&pThis->Event, 1, FALSE);
+
+ rtR0SemEventMultiNtRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiNtRetain(pThis);
+
+ /*
+ * Reset the event object.
+ */
+ KeResetEvent(&pThis->Event);
+
+ rtR0SemEventMultiNtRelease(pThis);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventMultiWaitEx.
+ * @param uTimeout See RTSemEventMultiWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+DECLINLINE(int) rtR0SemEventMultiNtWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ /*
+ * Validate input.
+ */
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ RT_NOREF1(pSrcPos);
+
+ rtR0SemEventMultiNtRetain(pThis);
+
+ /*
+ * Convert the timeout to a relative one because KeWaitForSingleObject
+ * takes system time instead of interrupt time as input for absolute
+ * timeout specifications. So, we're best of by giving it relative time.
+ *
+ * Lazy bird converts uTimeout to relative nanoseconds and then to Nt time.
+ */
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
+ uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000)
+ ? uTimeout * UINT32_C(1000000)
+ : UINT64_MAX;
+ if (uTimeout == UINT64_MAX)
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ {
+ if (fFlags & RTSEMWAIT_FLAGS_ABSOLUTE)
+ {
+ uint64_t u64Now = RTTimeSystemNanoTS();
+ uTimeout = u64Now < uTimeout
+ ? uTimeout - u64Now
+ : 0;
+ }
+ }
+ }
+
+ /*
+ * Wait for it.
+ * We're assuming interruptible waits should happen at UserMode level.
+ */
+ NTSTATUS rcNt;
+ BOOLEAN fInterruptible = !!(fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE);
+ KPROCESSOR_MODE WaitMode = fInterruptible ? UserMode : KernelMode;
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ rcNt = KeWaitForSingleObject(&pThis->Event, Executive, WaitMode, fInterruptible, NULL);
+ else
+ {
+ LARGE_INTEGER Timeout;
+ Timeout.QuadPart = -(int64_t)(uTimeout / 100);
+ rcNt = KeWaitForSingleObject(&pThis->Event, Executive, WaitMode, fInterruptible, &Timeout);
+ }
+ int rc;
+ if (pThis->u32Magic == RTSEMEVENTMULTI_MAGIC)
+ {
+ switch (rcNt)
+ {
+ case STATUS_SUCCESS:
+ rc = VINF_SUCCESS;
+ break;
+ case STATUS_ALERTED:
+ rc = VERR_INTERRUPTED;
+ break;
+ case STATUS_USER_APC:
+ rc = VERR_INTERRUPTED;
+ break;
+ case STATUS_TIMEOUT:
+ rc = VERR_TIMEOUT;
+ break;
+ default:
+ AssertMsgFailed(("pThis->u32Magic=%RX32 pThis=%p: wait returned %lx!\n",
+ pThis->u32Magic, pThis, (long)rcNt));
+ rc = VERR_INTERNAL_ERROR_4;
+ break;
+ }
+ }
+ else
+ rc = VERR_SEM_DESTROYED;
+
+ rtR0SemEventMultiNtRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventMultiWaitEx(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventMultiNtWait(hEventMultiSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventMultiNtWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+
+
+RTDECL(int) RTSemEventMultiWaitExDebug(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventMultiNtWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+}
+
+
+RTDECL(uint32_t) RTSemEventMultiGetResolution(void)
+{
+ return RTTimerGetSystemGranularity();
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/semfastmutex-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/semfastmutex-r0drv-nt.cpp
new file mode 100644
index 00000000..b4b27707
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/semfastmutex-r0drv-nt.cpp
@@ -0,0 +1,138 @@
+/* $Id: semfastmutex-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Fast Mutex Semaphores, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+#include <iprt/semaphore.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/errcore.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the linux semaphore structure.
+ */
+typedef struct RTSEMFASTMUTEXINTERNAL
+{
+ /** Magic value (RTSEMFASTMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** the NT fast mutex. */
+ FAST_MUTEX Mutex;
+} RTSEMFASTMUTEXINTERNAL, *PRTSEMFASTMUTEXINTERNAL;
+
+
+
+RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx)
+{
+ /*
+ * Allocate.
+ */
+ PRTSEMFASTMUTEXINTERNAL pThis;
+ Assert(sizeof(*pThis) > sizeof(void *));
+ pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize.
+ */
+ pThis->u32Magic = RTSEMFASTMUTEX_MAGIC;
+ ExInitializeFastMutex(&pThis->Mutex);
+
+ *phFastMtx = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx)
+{
+ /*
+ * Validate.
+ */
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ if (pThis == NIL_RTSEMFASTMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD);
+ Assert(pThis->Mutex.Count == 1);
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRequest(RTSEMFASTMUTEX hFastMtx)
+{
+ /*
+ * Validate.
+ */
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+#if 1
+ /*
+ * ExAcquireFastMutex will set the IRQL to APC regardless of our current
+ * level. Lowering the IRQL may screw things up, so do not allow this.
+ */
+# if 0 /** @todo enable this when the logger has been fixed. */
+ AssertMsg(KeGetCurrentIrql() <= APC_LEVEL, ("%d\n", KeGetCurrentIrql()), VERR_INVALID_STATE);
+# else /* the gentler approach. */
+ KIRQL Irql = KeGetCurrentIrql();
+ if (Irql > APC_LEVEL)
+ return VERR_INVALID_STATE;
+# endif
+#endif
+
+ ExAcquireFastMutex(&pThis->Mutex);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX hFastMtx)
+{
+ /*
+ * Validate.
+ */
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ ExReleaseFastMutex(&pThis->Mutex);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/semmutex-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/semmutex-r0drv-nt.cpp
new file mode 100644
index 00000000..62f9a00a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/semmutex-r0drv-nt.cpp
@@ -0,0 +1,236 @@
+/* $Id: semmutex-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Mutex Semaphores, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMMUTEX_WITHOUT_REMAPPING
+#include "the-nt-kernel.h"
+#include <iprt/semaphore.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/err.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * NT mutex semaphore.
+ */
+typedef struct RTSEMMUTEXINTERNAL
+{
+ /** Magic value (RTSEMMUTEX_MAGIC). */
+ uint32_t volatile u32Magic;
+#ifdef RT_USE_FAST_MUTEX
+ /** The fast mutex object. */
+ FAST_MUTEX Mutex;
+#else
+ /** The NT Mutex object. */
+ KMUTEX Mutex;
+#endif
+} RTSEMMUTEXINTERNAL, *PRTSEMMUTEXINTERNAL;
+
+
+
+RTDECL(int) RTSemMutexCreate(PRTSEMMUTEX phMutexSem)
+{
+ return RTSemMutexCreateEx(phMutexSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
+}
+
+
+RTDECL(int) RTSemMutexCreateEx(PRTSEMMUTEX phMutexSem, uint32_t fFlags,
+ RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...)
+{
+ AssertReturn(!(fFlags & ~RTSEMMUTEX_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
+ RT_NOREF3(hClass, uSubClass, pszNameFmt);
+
+ AssertCompile(sizeof(RTSEMMUTEXINTERNAL) > sizeof(void *));
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ pThis->u32Magic = RTSEMMUTEX_MAGIC;
+#ifdef RT_USE_FAST_MUTEX
+ ExInitializeFastMutex(&pThis->Mutex);
+#else
+ KeInitializeMutex(&pThis->Mutex, 0);
+#endif
+
+ *phMutexSem = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMutexSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
+ if (pThis == NIL_RTSEMMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);
+
+ /*
+ * Invalidate it and signal the object just in case.
+ */
+ AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTSEMMUTEX_MAGIC_DEAD, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE);
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Internal worker for RTSemMutexRequest and RTSemMutexRequestNoResume
+ *
+ * @returns IPRT status code.
+ * @param hMutexSem The mutex handle.
+ * @param cMillies The timeout.
+ * @param fInterruptible Whether it's interruptible
+ * (RTSemMutexRequestNoResume) or not
+ * (RTSemMutexRequest).
+ */
+static int rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, BOOLEAN fInterruptible)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);
+
+ /*
+ * Get the mutex.
+ */
+#ifdef RT_USE_FAST_MUTEX
+ AssertMsg(cMillies == RT_INDEFINITE_WAIT, ("timeouts are not supported when using fast mutexes!\n"));
+ ExAcquireFastMutex(&pThis->Mutex);
+ return VINF_SUCCESS;
+
+#else /* !RT_USE_FAST_MUTEX */
+ NTSTATUS rcNt;
+ if (cMillies == RT_INDEFINITE_WAIT)
+ rcNt = KeWaitForSingleObject(&pThis->Mutex, Executive, KernelMode, fInterruptible, NULL);
+ else
+ {
+ LARGE_INTEGER Timeout;
+ Timeout.QuadPart = -(int64_t)cMillies * 10000;
+ rcNt = KeWaitForSingleObject(&pThis->Mutex, Executive, KernelMode, fInterruptible, &Timeout);
+ }
+ switch (rcNt)
+ {
+ case STATUS_SUCCESS:
+ if (pThis->u32Magic == RTSEMMUTEX_MAGIC)
+ return VINF_SUCCESS;
+ return VERR_SEM_DESTROYED;
+
+ case STATUS_ALERTED:
+ case STATUS_USER_APC:
+ Assert(fInterruptible);
+ return VERR_INTERRUPTED;
+
+ case STATUS_TIMEOUT:
+ return VERR_TIMEOUT;
+
+ default:
+ AssertMsgFailed(("pThis->u32Magic=%RX32 pThis=%p: wait returned %lx!\n",
+ pThis->u32Magic, pThis, (long)rcNt));
+ return VERR_INTERNAL_ERROR;
+ }
+#endif /* !RT_USE_FAST_MUTEX */
+}
+
+
+RTDECL(int) RTSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtSemMutexRequest(hMutexSem, cMillies, FALSE /*fInterruptible*/);
+}
+
+
+RTDECL(int) RTSemMutexRequestDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RT_NOREF1(uId); RT_SRC_POS_NOREF();
+ return RTSemMutexRequest(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtSemMutexRequest(hMutexSem, cMillies, TRUE /*fInterruptible*/);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResumeDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RT_NOREF1(uId); RT_SRC_POS_NOREF();
+ return RTSemMutexRequestNoResume(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);
+
+ /*
+ * Release the mutex.
+ */
+#ifdef RT_USE_FAST_MUTEX
+ ExReleaseFastMutex(&pThis->Mutex);
+#else
+ KeReleaseMutex(&pThis->Mutex, FALSE /*Wait*/);
+#endif
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(bool) RTSemMutexIsOwned(RTSEMMUTEX hMutexSem)
+{
+ /*
+ * Validate.
+ */
+ RTSEMMUTEXINTERNAL *pThis = hMutexSem;
+ AssertPtrReturn(pThis, false);
+ AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, false);
+
+#ifdef RT_USE_FAST_MUTEX
+ return pThis->Mutex && pThis->Mutex->Owner != NULL;
+#else
+ return KeReadStateMutex(&pThis->Mutex) == 1;
+#endif
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/spinlock-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/spinlock-r0drv-nt.cpp
new file mode 100644
index 00000000..75ce49bc
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/spinlock-r0drv-nt.cpp
@@ -0,0 +1,197 @@
+/* $Id: spinlock-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Spinlocks, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+
+#include <iprt/spinlock.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mem.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** Apply the NoIrq hack if defined. */
+#define RTSPINLOCK_NT_HACK_NOIRQ
+
+#ifdef RTSPINLOCK_NT_HACK_NOIRQ
+/** Indicates that the spinlock is taken. */
+# define RTSPINLOCK_NT_HACK_NOIRQ_TAKEN UINT32(0x00c0ffee)
+/** Indicates that the spinlock is taken. */
+# define RTSPINLOCK_NT_HACK_NOIRQ_FREE UINT32(0xfe0000fe)
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the KSPIN_LOCK type.
+ */
+typedef struct RTSPINLOCKINTERNAL
+{
+ /** Spinlock magic value (RTSPINLOCK_MAGIC). */
+ uint32_t volatile u32Magic;
+#ifdef RTSPINLOCK_NT_HACK_NOIRQ
+ /** Spinlock hack. */
+ uint32_t volatile u32Hack;
+#endif
+ /** The saved IRQL. */
+ KIRQL volatile SavedIrql;
+ /** The saved interrupt flag. */
+ RTCCUINTREG volatile fIntSaved;
+ /** The spinlock creation flags. */
+ uint32_t fFlags;
+ /** The NT spinlock structure. */
+ KSPIN_LOCK Spinlock;
+} RTSPINLOCKINTERNAL, *PRTSPINLOCKINTERNAL;
+
+
+RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName)
+{
+ AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER);
+ RT_NOREF1(pszName);
+
+ /*
+ * Allocate.
+ */
+ Assert(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *));
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize & return.
+ */
+ pThis->u32Magic = RTSPINLOCK_MAGIC;
+#ifdef RTSPINLOCK_NT_HACK_NOIRQ
+ pThis->u32Hack = RTSPINLOCK_NT_HACK_NOIRQ_FREE;
+#endif
+ pThis->SavedIrql = 0;
+ pThis->fIntSaved = 0;
+ pThis->fFlags = fFlags;
+ KeInitializeSpinLock(&pThis->Spinlock);
+
+ *pSpinlock = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSpinlockDestroy(RTSPINLOCK Spinlock)
+{
+ /*
+ * Validate input.
+ */
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ if (pThis->u32Magic != RTSPINLOCK_MAGIC)
+ {
+ AssertMsgFailed(("Invalid spinlock %p magic=%#x\n", pThis, pThis->u32Magic));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ ASMAtomicIncU32(&pThis->u32Magic);
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_MAGIC, ("magic=%#x\n", pThis->u32Magic));
+
+ KIRQL SavedIrql;
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+#ifndef RTSPINLOCK_NT_HACK_NOIRQ
+ RTCCUINTREG fIntSaved = ASMGetFlags();
+ ASMIntDisable();
+ KeAcquireSpinLock(&pThis->Spinlock, &SavedIrql);
+#else
+ SavedIrql = KeGetCurrentIrql();
+ if (SavedIrql < DISPATCH_LEVEL)
+ {
+ KeRaiseIrql(DISPATCH_LEVEL, &SavedIrql);
+ Assert(SavedIrql < DISPATCH_LEVEL);
+ }
+ RTCCUINTREG fIntSaved = ASMGetFlags();
+ ASMIntDisable();
+
+ if (!ASMAtomicCmpXchgU32(&pThis->u32Hack, RTSPINLOCK_NT_HACK_NOIRQ_TAKEN, RTSPINLOCK_NT_HACK_NOIRQ_FREE))
+ {
+ while (!ASMAtomicCmpXchgU32(&pThis->u32Hack, RTSPINLOCK_NT_HACK_NOIRQ_TAKEN, RTSPINLOCK_NT_HACK_NOIRQ_FREE))
+ ASMNopPause();
+ }
+#endif
+ pThis->fIntSaved = fIntSaved;
+ }
+ else
+ KeAcquireSpinLock(&pThis->Spinlock, &SavedIrql);
+ pThis->SavedIrql = SavedIrql;
+}
+
+
+RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_MAGIC, ("magic=%#x\n", pThis->u32Magic));
+
+ KIRQL SavedIrql = pThis->SavedIrql;
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+ RTCCUINTREG fIntSaved = pThis->fIntSaved;
+ pThis->fIntSaved = 0;
+
+#ifndef RTSPINLOCK_NT_HACK_NOIRQ
+ KeReleaseSpinLock(&pThis->Spinlock, SavedIrql);
+ ASMSetFlags(fIntSaved);
+#else
+ Assert(pThis->u32Hack == RTSPINLOCK_NT_HACK_NOIRQ_TAKEN);
+
+ ASMAtomicWriteU32(&pThis->u32Hack, RTSPINLOCK_NT_HACK_NOIRQ_FREE);
+ ASMSetFlags(fIntSaved);
+ if (SavedIrql < DISPATCH_LEVEL)
+ KeLowerIrql(SavedIrql);
+#endif
+ }
+ else
+ KeReleaseSpinLock(&pThis->Spinlock, SavedIrql);
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/symdb.h b/src/VBox/Runtime/r0drv/nt/symdb.h
new file mode 100644
index 00000000..2fad6c3a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/symdb.h
@@ -0,0 +1,88 @@
+/* $Id: symdb.h $ */
+/** @file
+ * IPRT - Internal Header for the NT Ring-0 Driver Symbol DB.
+ */
+
+/*
+ * Copyright (C) 2013-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_nt_symdb_h
+#define IPRT_INCLUDED_SRC_r0drv_nt_symdb_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/types.h>
+
+
+/**
+ * NT Version info.
+ */
+typedef struct RTNTSDBOSVER
+{
+ /** The major version number. */
+ uint8_t uMajorVer;
+ /** The minor version number. */
+ uint8_t uMinorVer;
+ /** Set if checked build, clear if free (retail) build. */
+ uint8_t fChecked : 1;
+ /** Set if multi processor kernel. */
+ uint8_t fSmp : 1;
+ /** The service pack number. */
+ uint8_t uCsdNo : 6;
+ /** The build number. */
+ uint32_t uBuildNo;
+} RTNTSDBOSVER;
+/** Pointer to NT version info. */
+typedef RTNTSDBOSVER *PRTNTSDBOSVER;
+/** Pointer to const NT version info. */
+typedef RTNTSDBOSVER const *PCRTNTSDBOSVER;
+
+
+/**
+ * Compare NT OS version structures.
+ *
+ * @retval 0 if equal
+ * @retval 1 if @a pInfo1 is newer/greater than @a pInfo2
+ * @retval -1 if @a pInfo1 is older/less than @a pInfo2
+ *
+ * @param pInfo1 The first version info structure.
+ * @param pInfo2 The second version info structure.
+ */
+DECLINLINE(int) rtNtOsVerInfoCompare(PCRTNTSDBOSVER pInfo1, PCRTNTSDBOSVER pInfo2)
+{
+ if (pInfo1->uMajorVer != pInfo2->uMajorVer)
+ return pInfo1->uMajorVer > pInfo2->uMajorVer ? 1 : -1;
+ if (pInfo1->uMinorVer != pInfo2->uMinorVer)
+ return pInfo1->uMinorVer > pInfo2->uMinorVer ? 1 : -1;
+ if (pInfo1->uBuildNo != pInfo2->uBuildNo)
+ return pInfo1->uBuildNo > pInfo2->uBuildNo ? 1 : -1;
+ if (pInfo1->uCsdNo != pInfo2->uCsdNo)
+ return pInfo1->uCsdNo > pInfo2->uCsdNo ? 1 : -1;
+ if (pInfo1->fSmp != pInfo2->fSmp)
+ return pInfo1->fSmp > pInfo2->fSmp ? 1 : -1;
+ if (pInfo1->fChecked != pInfo2->fChecked)
+ return pInfo1->fChecked > pInfo2->fChecked ? 1 : -1;
+ return 0;
+}
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_nt_symdb_h */
+
diff --git a/src/VBox/Runtime/r0drv/nt/symdbdata.h b/src/VBox/Runtime/r0drv/nt/symdbdata.h
new file mode 100644
index 00000000..ab06fdde
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/symdbdata.h
@@ -0,0 +1,2988 @@
+/* $Id: symdbdata.h $ */
+/** @file
+ * IPRT - NT kernel type helpers - Autogenerated, do NOT edit.
+ */
+
+/*
+ * Copyright (C) 2013-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_nt_symdbdata_h
+#define IPRT_INCLUDED_SRC_r0drv_nt_symdbdata_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include "r0drv/nt/symdb.h"
+
+typedef struct RTNTSDBTYPE_KPRCB
+{
+ uint32_t offQuantumEnd;
+ uint32_t cbQuantumEnd;
+ uint32_t offDpcQueueDepth;
+ uint32_t cbDpcQueueDepth;
+ uint32_t offVendorString;
+ uint32_t cbVendorString;
+} RTNTSDBTYPE_KPRCB;
+
+
+typedef struct RTNTSDBSET
+{
+ RTNTSDBOSVER OsVerInfo;
+ RTNTSDBTYPE_KPRCB KPRCB;
+} RTNTSDBSET;
+typedef RTNTSDBSET const *PCRTNTSDBSET;
+
+
+#ifndef RTNTSDB_NO_DATA
+const RTNTSDBSET g_artNtSdbSets[] =
+{
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp3sym_nec98\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp3sym_en\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp3sym_en_chk\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp3sym_en_chk\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp3sym_nec98\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp3sym_nec98\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp3sym_en\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp3sym_en\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp3sym_en_chk\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp3sym_en_chk\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\Windows2000-KB891861-x86-Symbols-ENU\symbols\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\Windows2000-KB891861-x86-Symbols-ENU\symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp4sym_en\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp4sym_nec98\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\Windows2000-KB891861-nec98-Symbols-JPN\symbols\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\Windows2000-KB891861-nec98-Symbols-JPN\symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp4sym_en_chk\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp4sym_en_chk\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\Windows2000-KB891861-x86-Symbols-ENU\symbols\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\Windows2000-KB891861-x86-Symbols-ENU\symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp4sym_en\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp4sym_en\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp4sym_nec98\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp4sym_nec98\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\Windows2000-KB891861-nec98-Symbols-JPN\symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\Windows2000-KB891861-nec98-Symbols-JPN\symbols\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp4sym_en_chk\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\uold\w2ksp4sym_en_chk\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 4,
+ /* .uBuildNo = */ 2195,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0750,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x06e8,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x072d,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\windowsxp.x86.fre.rtm.symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\windowsxp.x86.fre.rtm.symbols\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\windowsxp.x86.chk.rtm.symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\windowsxp.x86.chk.rtm.symbols\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\windowsxp.x86.fre.rtm.symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\windowsxp.x86.fre.rtm.symbols\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\windowsxp.x86.chk.rtm.symbols\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\windowsxp.x86.chk.rtm.symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\xpsp1sym_x86\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\xpsp1sym_x86\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\xpsp1sym_x86_chk\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\xpsp1sym_x86_chk\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\xpsp1sym_x86\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\xpsp1sym_x86\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\xpsp1sym_x86_chk\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\xpsp1sym_x86_chk\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB835935-SP2-slp-Symbols\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB835935-SP2-slp-Symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB835935-SP2-Debug-slp-Symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB835935-SP2-Debug-slp-Symbols\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB835935-SP2-slp-Symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB835935-SP2-slp-Symbols\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB835935-SP2-Debug-slp-Symbols\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB835935-SP2-Debug-slp-Symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB936929-SP3-x86-symbols-full-ENU\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB936929-SP3-x86-symbols-full-ENU\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB936929-SP3-x86-DEBUG-symbols-full-ENU-DEBUG\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB936929-SP3-x86-DEBUG-symbols-full-ENU-DEBUG\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB936929-SP3-x86-symbols-full-ENU\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB936929-SP3-x86-symbols-full-ENU\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB936929-SP3-x86-DEBUG-symbols-full-ENU-DEBUG\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsXP-KB936929-SP3-x86-DEBUG-symbols-full-ENU-DEBUG\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 3,
+ /* .uBuildNo = */ 2600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x088c,
+ /* .cbQuantumEnd = */ 0x0004,
+ /* .offDpcQueueDepth = */ 0x0870,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0900,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003.x86.fre.rtm.symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x08c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x086c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0a78,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003.x86.fre.rtm.symbols\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x08c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x086c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0a78,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003.x86.chk.rtm.symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x08c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x086c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0a78,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003.x86.chk.rtm.symbols\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x08c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x086c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0a78,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003.x86.fre.rtm.symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x08c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x086c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0a78,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003.x86.fre.rtm.symbols\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x08c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x086c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0a78,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003.x86.chk.rtm.symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x08c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x086c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0a78,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003.x86.chk.rtm.symbols\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x08c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x086c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0a78,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x64-symbols-NRL-ENU\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1f75,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x1f18,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x22b4,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.x86.fre.rtm.symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.x86.fre.rtm.symbols\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x86-symbols-NRL-ENU\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x86-symbols-NRL-ENU\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.amd64.fre.rtm.symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1f75,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x1f18,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x22b4,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x86-symbols-NRL-ENU-DEBUG\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x86-symbols-NRL-ENU-DEBUG\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x64-symbols-NRL-ENU-DEBUG\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1f75,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x1f18,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x22b4,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.x86.chk.rtm.symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.x86.chk.rtm.symbols\exe\ntkrnlpa.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.amd64.chk.rtm.symbols\exe\ntoskrnl.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ false,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1f75,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x1f18,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x22b4,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x64-symbols-NRL-ENU\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1f75,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x1f18,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x22b4,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.x86.fre.rtm.symbols\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.x86.fre.rtm.symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x86-symbols-NRL-ENU\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x86-symbols-NRL-ENU\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.amd64.fre.rtm.symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1f75,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x1f18,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x22b4,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x86-symbols-NRL-ENU-DEBUG\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x86-symbols-NRL-ENU-DEBUG\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsServer2003-KB933548-v1-x64-symbols-NRL-ENU-DEBUG\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1f75,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x1f18,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x22b4,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.x86.chk.rtm.symbols\exe\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.x86.chk.rtm.symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x0981,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x092c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x0b60,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows2003_sp1.amd64.chk.rtm.symbols\exe\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 5,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 3790,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1f75,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x1f18,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x22b4,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsVista.6000.061101-2205.x86fre.Symbols\EXE\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 6000,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x19c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x196c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1bac,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsVista.6000.061101-2205.x86fre.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 6000,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x19c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x196c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1bac,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\WindowsVista.6000.061101-2205.amd64fre.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 6000,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x3375,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x3318,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x38bc,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsVista.6000.061101-2205.x86chk.Symbols\EXE\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 6000,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x19c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x196c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1bac,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsVista.6000.061101-2205.x86chk.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 6000,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x19c1,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x196c,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1bac,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\WindowsVista.6000.061101-2205.amd64chk.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 6000,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x3375,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x3318,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x38bc,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Longhorn.6001.080118-1840.x86fre.Symbols\EXE\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 6001,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1a41,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x19ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1c2c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Longhorn.6001.080118-1840.x86fre.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 6001,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1a41,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x19ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1c2c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows_Longhorn.6001.080118-1840.amd64fre.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 6001,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x3475,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x3418,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x399c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Longhorn.6001.080118-1840.x86chk.Symbols\EXE\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 6001,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1a41,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x19ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1c2c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Longhorn.6001.080118-1840.x86chk.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 6001,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1a41,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x19ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1c2c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows_Longhorn.6001.080118-1840.amd64chk.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 6001,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x3475,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x3418,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x399c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsVista.6002.090410-1830.x86fre.Symbols\EXE\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 6002,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1a41,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x19ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1c2c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsVista.6002.090410-1830.x86fre.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 6002,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1a41,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x19ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1c2c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\WindowsVista.6002.090410-1830.amd64fre.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 6002,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x3475,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x3418,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x399c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsVista.6002.090410-1830.x86chk.Symbols\EXE\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 6002,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1a41,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x19ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1c2c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\WindowsVista.6002.090410-1830.x86chk.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 6002,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1a41,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x19ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x1c2c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\WindowsVista.6002.090410-1830.amd64chk.Symbols\EXE\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 2,
+ /* .uBuildNo = */ 6002,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x3475,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x3418,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x399c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Win7.7600.16385.090713-1255.X86FRE.Symbols\Symbols\ntkrpamp.pdb\5B308B4ED6464159B87117C711E7340C2\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 7600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1931,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x18ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x336c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Win7.7600.16385.090713-1255.X86FRE.Symbols\Symbols\ntkrnlmp.pdb\998A3472EEA6405CB8C089DE868F26222\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 7600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1931,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x18ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x336c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows_Win7.7600.16385.090713-1255.X64FRE.Symbols\Symbols\ntkrnlmp.pdb\F8E2A8B5C9B74BF4A6E4A48F180099942\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 7600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x21d9,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x2198,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x4bb8,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Win7.7600.16385.090713-1255.X86CHK.Symbols\Symbols\ntkrnlmp.pdb\9E7882E37C3E4AC9BB60F4EAD9DB492A1\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 7600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1931,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x18ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x336c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Win7.7600.16385.090713-1255.X86CHK.Symbols\Symbols\ntkrpamp.pdb\3269AC66C11B41FC995991F129E95D5C1\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 7600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1931,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x18ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x336c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows_Win7.7600.16385.090713-1255.X64CHK.Symbols\Symbols\ntkrnlmp.pdb\C491E3167994497FA91338D08A7787041\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 7600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x21d9,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x2198,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x4bb8,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Win7SP1.7601.17514.101119-1850.X86FRE.Symbols\Symbols\ntkrpamp.pdb\684DA42A30CC450F81C535B4D18944B12\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 7601,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1931,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x18ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x336c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Win7SP1.7601.17514.101119-1850.X86FRE.Symbols\Symbols\ntkrnlmp.pdb\00625D7D36754CBEBA4533BA9A0F3FE22\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 7601,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1931,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x18ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x336c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows_Win7SP1.7601.17514.101119-1850.AMD64FRE.Symbols\Symbols\ntkrnlmp.pdb\3844DBB920174967BE7AA4A2C20430FA2\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 7601,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x21d9,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x2198,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x4bb8,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Win7SP1.7601.17514.101119-1850.X86CHK.Symbols\Symbols\ntkrpamp.pdb\C3355A163C47464183D85DE0B836F83A1\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 7601,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1931,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x18ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x336c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Win7SP1.7601.17514.101119-1850.X86CHK.Symbols\Symbols\ntkrnlmp.pdb\1477BEA3E003427CB248D5233B0601951\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 7601,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x1931,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x18ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x336c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows_Win7SP1.7601.17514.101119-1850.AMD64CHK.Symbols\Symbols\ntkrnlmp.pdb\FF0DE75C807A4B85A7668D2113A62EF11\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 1,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 1,
+ /* .uBuildNo = */ 7601,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x21d9,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x2198,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x4bb8,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Win8.9200.16384.120725-1247.X86FRE.Symbols\Symbols\ntkrpamp.pdb\E2342527EA214C109CD28A19ED4FBCCE2\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 9200,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x2231,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x21ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x3c7c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows_Win8.9200.16384.120725-1247.x64FRE.Symbols\Symbols\ntkrnlmp.pdb\724821001C1C4A03AED8C4C71C2E8D1D2\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 9200,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x2dd9,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x2d98,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x5948,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\Windows_Win8.9200.16384.120725-1247.X86CHK.Symbols\Symbols\ntkrpamp.pdb\C4F414C9D1854DE495BDAD814A722C4D1\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 9200,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x2231,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x21ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x3c7c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\Windows_Win8.9200.16384.120725-1247.x64CHK.Symbols\Symbols\ntkrnlmp.pdb\FC0361C3243D459496EE02EF1A7ACD271\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 2,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 9200,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x2dd9,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x2d98,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x5948,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\en_windows_8_1_symbols_x86_2712593\ntkrpamp.pdb\9DC1F995475C456C8D1AA9606E3106931\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 3,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 9600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x2239,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x21ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x3c7c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\en_windows_8_1_symbols_x64_2712576\ntkrnlmp.pdb\A9BBA3C139724A738BE17665DB4393CA1\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 3,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 9600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x2de9,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x2d98,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x5958,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\en_windows_8_1_symbols_debug_checked_x86_2712583\ntkrpamp.pdb\77DAB075113647B5888133D3F79B7B171\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 3,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 9600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x2239,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x21ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x3c7c,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\en_windows_8_1_symbols_debug_checked_x64_2712568\ntkrnlmp.pdb\4C5FFE3E839647C5B9471D0C8F9710E11\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 6,
+ /* .uMinorVer = */ 3,
+ /* .fChecked = */ true,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 9600,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x2de9,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x2d98,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x5958,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_X86
+ { /* Source: s:\WinSyms\u\en_windows_10_symbols_x86_6903197\ntkrpamp.pdb\3A07902D18FD40CE929445D1777703241\ntkrpamp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 10,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 10240,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x2239,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x21ec,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x3cfc,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\en_windows_10_symbols_x64_6903177\ntkrnlmp.pdb\C68EE22FDCF6477895C54A862BE165671\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 10,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 10240,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x2de9,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x2d98,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x6258,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+# ifdef RT_ARCH_AMD64
+ { /* Source: s:\WinSyms\u\en_windows_10_17134_x64_symserv\ntkrnlmp.pdb\1E2C949B928244638C2A7406B9F3824A1\ntkrnlmp.pdb */
+ /*.OsVerInfo = */
+ {
+ /* .uMajorVer = */ 10,
+ /* .uMinorVer = */ 0,
+ /* .fChecked = */ false,
+ /* .fSmp = */ true,
+ /* .uCsdNo = */ 0,
+ /* .uBuildNo = */ 17134,
+ },
+ /* .KPRCB = */
+ {
+ /* .offQuantumEnd = */ 0x2e69,
+ /* .cbQuantumEnd = */ 0x0001,
+ /* .offDpcQueueDepth = */ 0x2e18,
+ /* .cbDpcQueueDepth = */ 0x0004,
+ /* .offVendorString = */ 0x6290,
+ /* .cbVendorString = */ 0x000d,
+ },
+ },
+# endif
+};
+#endif /* !RTNTSDB_NO_DATA */
+
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_nt_symdbdata_h */
+
diff --git a/src/VBox/Runtime/r0drv/nt/the-nt-kernel.h b/src/VBox/Runtime/r0drv/nt/the-nt-kernel.h
new file mode 100644
index 00000000..a3e319c1
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/the-nt-kernel.h
@@ -0,0 +1,89 @@
+/* $Id: the-nt-kernel.h $ */
+/** @file
+ * IPRT - Include all necessary headers for the NT kernel.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_nt_the_nt_kernel_h
+#define IPRT_INCLUDED_SRC_r0drv_nt_the_nt_kernel_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/cdefs.h>
+
+#if defined(RT_ARCH_X86) && !defined(NO_INTERLOCKED_INTRINSICS)
+# define NO_INTERLOCKED_INTRINSICS /* avoid trouble */
+#endif
+#if (_MSC_VER >= 1400) && !defined(VBOX_WITH_PATCHED_DDK)
+# include <iprt/asm.h>
+# define _InterlockedExchange _InterlockedExchange_StupidDDKVsCompilerCrap
+# define _InterlockedExchangeAdd _InterlockedExchangeAdd_StupidDDKVsCompilerCrap
+# define _InterlockedCompareExchange _InterlockedCompareExchange_StupidDDKVsCompilerCrap
+# define _InterlockedAddLargeStatistic _InterlockedAddLargeStatistic_StupidDDKVsCompilerCrap
+# pragma warning(disable : 4163)
+RT_C_DECLS_BEGIN
+# include <iprt/nt/nt.h>
+RT_C_DECLS_END
+# pragma warning(default : 4163)
+# undef _InterlockedExchange
+# undef _InterlockedExchangeAdd
+# undef _InterlockedCompareExchange
+# undef _InterlockedAddLargeStatistic
+#else
+RT_C_DECLS_BEGIN
+# include <iprt/nt/nt.h>
+RT_C_DECLS_END
+#endif
+
+#include <memory.h>
+#if !defined(RT_OS_WINDOWS)
+# error "RT_OS_WINDOWS must be defined!"
+#endif
+
+#include <iprt/param.h>
+#ifndef PAGE_OFFSET_MASK
+# define PAGE_OFFSET_MASK (PAGE_SIZE - 1)
+#endif
+
+/* Missing if we're compiling against older WDKs. */
+#ifndef NonPagedPoolNx
+# define NonPagedPoolNx ((POOL_TYPE)512)
+#endif
+
+/*
+ * When targeting NT4 we have to undo some of the nice macros
+ * installed by the later DDKs.
+ */
+#undef ExAllocatePool
+#undef ExFreePool
+
+/** @def IPRT_NT_POOL_TAG
+ * Tag to use with the NT Pool APIs.
+ * In memory and in the various windbg tools it appears in the reverse order of
+ * what it is given as here, so it'll read "IPRT".
+ */
+#define IPRT_NT_POOL_TAG 'TRPI'
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_nt_the_nt_kernel_h */
+
diff --git a/src/VBox/Runtime/r0drv/nt/thread-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/thread-r0drv-nt.cpp
new file mode 100644
index 00000000..a5acd905
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/thread-r0drv-nt.cpp
@@ -0,0 +1,228 @@
+/* $Id: thread-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Threads, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mp.h>
+#include "internal-r0drv-nt.h"
+
+
+
+RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
+{
+ return (RTNATIVETHREAD)PsGetCurrentThread();
+}
+
+
+static int rtR0ThreadNtSleepCommon(RTMSINTERVAL cMillies)
+{
+ LARGE_INTEGER Interval;
+ Interval.QuadPart = -(int64_t)cMillies * 10000;
+ NTSTATUS rcNt = KeDelayExecutionThread(KernelMode, TRUE, &Interval);
+ switch (rcNt)
+ {
+ case STATUS_SUCCESS:
+ return VINF_SUCCESS;
+ case STATUS_ALERTED:
+ case STATUS_USER_APC:
+ return VERR_INTERRUPTED;
+ default:
+ return RTErrConvertFromNtStatus(rcNt);
+ }
+}
+
+
+RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadNtSleepCommon(cMillies);
+}
+
+
+RTDECL(bool) RTThreadYield(void)
+{
+ return ZwYieldExecution() != STATUS_NO_YIELD_PERFORMED;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD); RT_NOREF1(hThread);
+ KIRQL Irql = KeGetCurrentIrql();
+ if (Irql > APC_LEVEL)
+ return false;
+ if (!ASMIntAreEnabled())
+ return false;
+ return true;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD); RT_NOREF1(hThread);
+
+ /*
+ * The KeShouldYieldProcessor API introduced in Windows 10 looks like exactly
+ * what we want. But of course there is a snag. It may return with interrupts
+ * enabled when called with them disabled. Let's just hope it doesn't get upset
+ * by disabled interrupts in other ways...
+ */
+ if (g_pfnrtKeShouldYieldProcessor)
+ {
+ RTCCUINTREG fSavedFlags = ASMGetFlags();
+ bool fReturn = g_pfnrtKeShouldYieldProcessor() != FALSE;
+ ASMSetFlags(fSavedFlags);
+ return fReturn;
+ }
+
+ /*
+ * Fallback approach for pre W10 kernels.
+ *
+ * If W10 is anything to go by, we should also check and yield when:
+ * - pPrcb->NextThread != NULL && pPrcb->NextThread != pPrcb->CurrentThread
+ * when QuantumEnd is zero.
+ * - pPrcb->DpcRequestSummary & 1
+ * - pPrcb->DpcRequestSummary & 0x1e
+ */
+
+ /*
+ * Read the globals and check if they are useful.
+ */
+/** @todo Should we check KPRCB.InterruptRequest and KPRCB.DpcInterruptRequested (older kernels). */
+ uint32_t const offQuantumEnd = g_offrtNtPbQuantumEnd;
+ uint32_t const cbQuantumEnd = g_cbrtNtPbQuantumEnd;
+ uint32_t const offDpcQueueDepth = g_offrtNtPbDpcQueueDepth;
+ if (!offQuantumEnd && !cbQuantumEnd && !offDpcQueueDepth)
+ return false;
+ Assert((offQuantumEnd && cbQuantumEnd) || (!offQuantumEnd && !cbQuantumEnd));
+
+ /*
+ * Disable interrupts so we won't be messed around.
+ */
+ bool fPending;
+ RTCCUINTREG fSavedFlags = ASMIntDisableFlags();
+
+#ifdef RT_ARCH_X86
+ PKPCR pPcr = (PKPCR)__readfsdword(RT_UOFFSETOF(KPCR,SelfPcr));
+ uint8_t *pbPrcb = (uint8_t *)pPcr->Prcb;
+
+#elif defined(RT_ARCH_AMD64)
+ /* HACK ALERT! The offset is from windbg/vista64. */
+ PKPCR pPcr = (PKPCR)__readgsqword(RT_UOFFSETOF(KPCR,Self));
+ uint8_t *pbPrcb = (uint8_t *)pPcr->CurrentPrcb;
+
+#else
+# error "port me"
+#endif
+
+ /* Check QuantumEnd. */
+ if (cbQuantumEnd == 1)
+ {
+ uint8_t volatile *pbQuantumEnd = (uint8_t volatile *)(pbPrcb + offQuantumEnd);
+ fPending = *pbQuantumEnd == TRUE;
+ }
+ else if (cbQuantumEnd == sizeof(uint32_t))
+ {
+ uint32_t volatile *pu32QuantumEnd = (uint32_t volatile *)(pbPrcb + offQuantumEnd);
+ fPending = *pu32QuantumEnd != 0;
+ }
+ else
+ fPending = false;
+
+ /* Check DpcQueueDepth. */
+ if ( !fPending
+ && offDpcQueueDepth)
+ {
+ uint32_t volatile *pu32DpcQueueDepth = (uint32_t volatile *)(pbPrcb + offDpcQueueDepth);
+ fPending = *pu32DpcQueueDepth > 0;
+ }
+
+ ASMSetFlags(fSavedFlags);
+ return fPending;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
+{
+ if (g_pfnrtKeShouldYieldProcessor)
+ return true;
+#if 0 /** @todo RTThreadPreemptIsPending isn't good enough on w7 and possibly elsewhere. */
+ /* RTThreadPreemptIsPending is only reliable if we've got both offsets and size. */
+ return g_offrtNtPbQuantumEnd != 0
+ && g_cbrtNtPbQuantumEnd != 0
+ && g_offrtNtPbDpcQueueDepth != 0;
+#else
+ return false;
+#endif
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPossible(void)
+{
+ /* yes, kernel preemption is possible. */
+ return true;
+}
+
+
+RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ Assert(pState->uchOldIrql == 255);
+ Assert(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ KeRaiseIrql(DISPATCH_LEVEL, &pState->uchOldIrql);
+ RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
+}
+
+
+RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+
+ RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
+ KeLowerIrql(pState->uchOldIrql);
+ pState->uchOldIrql = 255;
+}
+
+
+RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD); NOREF(hThread);
+
+ KIRQL CurIrql = KeGetCurrentIrql();
+ return CurIrql > PASSIVE_LEVEL; /** @todo Is there a more correct way? */
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/thread2-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/thread2-r0drv-nt.cpp
new file mode 100644
index 00000000..1eebd39d
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/thread2-r0drv-nt.cpp
@@ -0,0 +1,157 @@
+/* $Id: thread2-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Threads (Part 2), Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+
+#include <iprt/thread.h>
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+
+#include "internal/thread.h"
+
+
+DECLHIDDEN(int) rtThreadNativeInit(void)
+{
+ /* No TLS in Ring-0. :-/ */
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(RTTHREAD) RTThreadSelf(void)
+{
+ return rtThreadGetByNative((RTNATIVETHREAD)PsGetCurrentThread());
+}
+
+
+DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
+{
+ /*
+ * Convert the IPRT priority type to NT priority.
+ *
+ * The NT priority is in the range 0..32, with realtime starting
+ * at 16 and the default for user processes at 8. (Should try find
+ * the appropriate #defines for some of this...)
+ */
+ KPRIORITY Priority;
+ switch (enmType)
+ {
+ case RTTHREADTYPE_INFREQUENT_POLLER: Priority = 6; break;
+ case RTTHREADTYPE_EMULATION: Priority = 7; break;
+ case RTTHREADTYPE_DEFAULT: Priority = 8; break;
+ case RTTHREADTYPE_MSG_PUMP: Priority = 9; break;
+ case RTTHREADTYPE_IO: Priority = LOW_REALTIME_PRIORITY; break;
+ case RTTHREADTYPE_TIMER: Priority = MAXIMUM_PRIORITY; break;
+
+ default:
+ AssertMsgFailed(("enmType=%d\n", enmType));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Do the actual modification.
+ */
+ /*KPRIORITY oldPririty = */KeSetPriorityThread((PKTHREAD)pThread->Core.Key, Priority);
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtThreadNativeAdopt(PRTTHREADINT pThread)
+{
+ RT_NOREF1(pThread);
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+DECLHIDDEN(void) rtThreadNativeWaitKludge(PRTTHREADINT pThread)
+{
+ PVOID pvThreadObj = pThread->Core.Key;
+ NTSTATUS rcNt = KeWaitForSingleObject(pvThreadObj, Executive, KernelMode, FALSE, NULL);
+ AssertMsg(rcNt == STATUS_SUCCESS, ("rcNt=%#x\n", rcNt)); RT_NOREF1(rcNt);
+}
+
+
+DECLHIDDEN(void) rtThreadNativeDestroy(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+}
+
+
+/**
+ * Native kernel thread wrapper function.
+ *
+ * This will forward to rtThreadMain and do termination upon return.
+ *
+ * @param pvArg Pointer to the argument package.
+ */
+static VOID rtThreadNativeMain(PVOID pvArg)
+{
+ PETHREAD Self = PsGetCurrentThread();
+ PRTTHREADINT pThread = (PRTTHREADINT)pvArg;
+
+ rtThreadMain(pThread, (RTNATIVETHREAD)Self, &pThread->szName[0]);
+
+ ObDereferenceObject(Self); /* the rtThreadNativeCreate ref. */
+}
+
+
+DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
+{
+ /*
+ * PsCreateSysemThread create a thread an give us a handle in return.
+ * We requests the object for that handle and then close it, so what
+ * we keep around is the pointer to the thread object and not a handle.
+ * The thread will dereference the object before returning.
+ */
+ HANDLE hThread = NULL;
+ OBJECT_ATTRIBUTES ObjAttr;
+ InitializeObjectAttributes(&ObjAttr, NULL, OBJ_KERNEL_HANDLE, NULL, NULL);
+ NTSTATUS rc = PsCreateSystemThread(&hThread,
+ THREAD_ALL_ACCESS,
+ &ObjAttr,
+ NULL /* ProcessHandle - kernel */,
+ NULL /* ClientID - kernel */,
+ rtThreadNativeMain,
+ pThreadInt);
+ if (NT_SUCCESS(rc))
+ {
+ PVOID pvThreadObj;
+ rc = ObReferenceObjectByHandle(hThread, THREAD_ALL_ACCESS, NULL /* object type */,
+ KernelMode, &pvThreadObj, NULL /* handle info */);
+ if (NT_SUCCESS(rc))
+ {
+ ZwClose(hThread);
+ *pNativeThread = (RTNATIVETHREAD)pvThreadObj;
+ }
+ else
+ AssertMsgFailed(("%#x\n", rc));
+ }
+ return RTErrConvertFromNtStatus(rc);
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/time-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/time-r0drv-nt.cpp
new file mode 100644
index 00000000..29d12657
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/time-r0drv-nt.cpp
@@ -0,0 +1,149 @@
+/* $Id: time-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Time, Ring-0 Driver, Nt.
+ */
+
+/*
+ * Copyright (C) 2007-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP RTLOGGROUP_TIME
+#include "the-nt-kernel.h"
+#include "internal-r0drv-nt.h"
+#include <iprt/time.h>
+
+
+/*
+ * The KeQueryTickCount macro isn't compatible with NT 3.1, use the
+ * exported KPI instead.
+ */
+#ifdef RT_ARCH_X86
+# undef KeQueryTickCount
+extern "C" NTKERNELAPI void NTAPI KeQueryTickCount(PLARGE_INTEGER);
+#endif
+
+
+DECLINLINE(uint64_t) rtTimeGetSystemNanoTS(void)
+{
+ /*
+ * Note! The time source we use here must be exactly the same as in
+ * the ring-3 code!
+ *
+ * Using interrupt time is the simplest and requires the least calculation.
+ * It is also accounting for suspended time. Unfortuantely, there is no
+ * ring-3 for reading it... but that won't stop us.
+ *
+ * Using the tick count is problematic in ring-3 on older windows version
+ * as we can only get the 32-bit tick value, i.e. we'll roll over sooner or
+ * later.
+ */
+#if 1
+ /* Interrupt time. */
+ LARGE_INTEGER InterruptTime;
+ if (g_pfnrtKeQueryInterruptTimePrecise)
+ {
+ ULONG64 QpcTsIgnored;
+ InterruptTime.QuadPart = g_pfnrtKeQueryInterruptTimePrecise(&QpcTsIgnored);
+ }
+# ifdef RT_ARCH_X86
+ else if (g_pfnrtKeQueryInterruptTime) /* W2K+ */
+ InterruptTime.QuadPart = g_pfnrtKeQueryInterruptTime();
+ else if (g_uRtNtVersion >= RTNT_MAKE_VERSION(3, 50))
+ {
+ /* NT 3.50 and later, also pre-init: Use the user shared data. */
+ do
+ {
+ InterruptTime.HighPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High1Time;
+ InterruptTime.LowPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.LowPart;
+ } while (((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High2Time != InterruptTime.HighPart);
+ }
+ else
+ {
+ /*
+ * There is no KUSER_SHARED_DATA structure on NT 3.1, so we have no choice
+ * but to use the tick count. We must also avoid the KeQueryTickCount macro
+ * in the WDK, since NT 3.1 does have the KeTickCount data export either (see above).
+ */
+ static ULONG volatile s_uTimeIncrement = 0;
+ ULONG uTimeIncrement = s_uTimeIncrement;
+ if (!uTimeIncrement)
+ {
+ uTimeIncrement = KeQueryTimeIncrement();
+ Assert(uTimeIncrement != 0);
+ Assert(uTimeIncrement * 100 / 100 == uTimeIncrement);
+ uTimeIncrement *= 100;
+ s_uTimeIncrement = uTimeIncrement;
+ }
+
+ KeQueryTickCount(&InterruptTime);
+ return (uint64_t)InterruptTime.QuadPart * uTimeIncrement;
+ }
+# else
+ else
+ InterruptTime.QuadPart = KeQueryInterruptTime(); /* Macro on AMD64. */
+# endif
+ return (uint64_t)InterruptTime.QuadPart * 100;
+#else
+ /* Tick count. Works all the way back to NT 3.1 with #undef above. */
+ LARGE_INTEGER Tick;
+ KeQueryTickCount(&Tick);
+ return (uint64_t)Tick.QuadPart * KeQueryTimeIncrement() * 100;
+#endif
+}
+
+
+RTDECL(uint64_t) RTTimeNanoTS(void)
+{
+ return rtTimeGetSystemNanoTS();
+}
+
+
+RTDECL(uint64_t) RTTimeMilliTS(void)
+{
+ return rtTimeGetSystemNanoTS() / RT_NS_1MS;
+}
+
+
+RTDECL(uint64_t) RTTimeSystemNanoTS(void)
+{
+ return rtTimeGetSystemNanoTS();
+}
+
+
+RTDECL(uint64_t) RTTimeSystemMilliTS(void)
+{
+ return rtTimeGetSystemNanoTS() / RT_NS_1MS;
+}
+
+
+RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
+{
+ LARGE_INTEGER SystemTime;
+ if (g_pfnrtKeQuerySystemTimePrecise)
+ g_pfnrtKeQuerySystemTimePrecise(&SystemTime);
+ else
+ KeQuerySystemTime(&SystemTime); /* Macro on AMD64, export on X86. */
+ return RTTimeSpecSetNtTime(pTime, SystemTime.QuadPart);
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp
new file mode 100644
index 00000000..001123bc
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp
@@ -0,0 +1,597 @@
+/* $Id: timer-r0drv-nt.cpp $ */
+/** @file
+ * IPRT - Timers, Ring-0 Driver, NT.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-nt-kernel.h"
+
+#include <iprt/timer.h>
+#include <iprt/mp.h>
+#include <iprt/cpuset.h>
+#include <iprt/err.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+
+#include "internal-r0drv-nt.h"
+#include "internal/magics.h"
+
+/** This seems to provide better accuracy. */
+#define RTR0TIMER_NT_MANUAL_RE_ARM 1
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * A sub timer structure.
+ *
+ * This is used for keeping the per-cpu tick and DPC object.
+ */
+typedef struct RTTIMERNTSUBTIMER
+{
+ /** The tick counter. */
+ uint64_t iTick;
+ /** Pointer to the parent timer. */
+ PRTTIMER pParent;
+ /** Thread active executing the worker function, NIL if inactive. */
+ RTNATIVETHREAD volatile hActiveThread;
+ /** The NT DPC object. */
+ KDPC NtDpc;
+} RTTIMERNTSUBTIMER;
+/** Pointer to a NT sub-timer structure. */
+typedef RTTIMERNTSUBTIMER *PRTTIMERNTSUBTIMER;
+
+/**
+ * The internal representation of an Linux timer handle.
+ */
+typedef struct RTTIMER
+{
+ /** Magic.
+ * This is RTTIMER_MAGIC, but changes to something else before the timer
+ * is destroyed to indicate clearly that thread should exit. */
+ uint32_t volatile u32Magic;
+ /** Suspend count down for single shot omnit timers. */
+ int32_t volatile cOmniSuspendCountDown;
+ /** Flag indicating the timer is suspended. */
+ bool volatile fSuspended;
+ /** Whether the timer must run on one specific CPU or not. */
+ bool fSpecificCpu;
+ /** Whether the timer must run on all CPUs or not. */
+ bool fOmniTimer;
+ /** The CPU it must run on if fSpecificCpu is set.
+ * The master CPU for an omni-timer. */
+ RTCPUID idCpu;
+ /** Callback. */
+ PFNRTTIMER pfnTimer;
+ /** User argument. */
+ void *pvUser;
+ /** The timer interval. 0 if one-shot. */
+ uint64_t u64NanoInterval;
+#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
+ /** The desired NT time of the first tick. */
+ uint64_t uNtStartTime;
+#endif
+ /** The Nt timer object. */
+ KTIMER NtTimer;
+ /** The number of sub-timers. */
+ RTCPUID cSubTimers;
+ /** Sub-timers.
+ * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
+ * an entry for all possible cpus. In that case the index will be the same as
+ * for the RTCpuSet. */
+ RTTIMERNTSUBTIMER aSubTimers[1];
+} RTTIMER;
+
+
+#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
+/**
+ * Get current NT interrupt time.
+ * @return NT interrupt time
+ */
+static uint64_t rtTimerNtQueryInterruptTime(void)
+{
+# ifdef RT_ARCH_AMD64
+ return KeQueryInterruptTime(); /* macro */
+# else
+ if (g_pfnrtKeQueryInterruptTime)
+ return g_pfnrtKeQueryInterruptTime();
+
+ /* NT4 */
+ ULARGE_INTEGER InterruptTime;
+ do
+ {
+ InterruptTime.HighPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High1Time;
+ InterruptTime.LowPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.LowPart;
+ } while (((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High2Time != (LONG)InterruptTime.HighPart);
+ return InterruptTime.QuadPart;
+# endif
+}
+#endif /* RTR0TIMER_NT_MANUAL_RE_ARM */
+
+
+/**
+ * Manually re-arms an internval timer.
+ *
+ * Turns out NT doesn't necessarily do a very good job at re-arming timers
+ * accurately.
+ *
+ * @param pTimer The timer.
+ * @param iTick The current timer tick.
+ * @param pMasterDpc The master DPC.
+ */
+DECLINLINE(void) rtTimerNtRearmInternval(PRTTIMER pTimer, uint64_t iTick, PKDPC pMasterDpc)
+{
+#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
+ Assert(pTimer->u64NanoInterval);
+ RT_NOREF1(pMasterDpc);
+
+ uint64_t uNtNext = (iTick * pTimer->u64NanoInterval) / 100 - 10; /* 1us fudge */
+ LARGE_INTEGER DueTime;
+ DueTime.QuadPart = rtTimerNtQueryInterruptTime() - pTimer->uNtStartTime;
+ if (DueTime.QuadPart < 0)
+ DueTime.QuadPart = 0;
+ if ((uint64_t)DueTime.QuadPart < uNtNext)
+ DueTime.QuadPart -= uNtNext;
+ else
+ DueTime.QuadPart = -2500; /* 0.25ms */
+
+ KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, &pTimer->aSubTimers[0].NtDpc);
+#else
+ RT_NOREF3(pTimer, iTick, pMasterDpc);
+#endif
+}
+
+
+/**
+ * Timer callback function for the non-omni timers.
+ *
+ * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer.
+ * @param pDpc Pointer to the DPC.
+ * @param pvUser Pointer to our internal timer structure.
+ * @param SystemArgument1 Some system argument.
+ * @param SystemArgument2 Some system argument.
+ */
+static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
+{
+ PRTTIMER pTimer = (PRTTIMER)pvUser;
+ AssertPtr(pTimer);
+#ifdef RT_STRICT
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
+#endif
+
+ /*
+ * Check that we haven't been suspended before doing the callout.
+ */
+ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
+ && pTimer->u32Magic == RTTIMER_MAGIC)
+ {
+ ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, RTThreadNativeSelf());
+
+ if (!pTimer->u64NanoInterval)
+ ASMAtomicWriteBool(&pTimer->fSuspended, true);
+ uint64_t iTick = ++pTimer->aSubTimers[0].iTick;
+ if (pTimer->u64NanoInterval)
+ rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[0].NtDpc);
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
+
+ ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD);
+ }
+
+ NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
+}
+
+
+/**
+ * The slave DPC callback for an omni timer.
+ *
+ * @param pDpc The DPC object.
+ * @param pvUser Pointer to the sub-timer.
+ * @param SystemArgument1 Some system stuff.
+ * @param SystemArgument2 Some system stuff.
+ */
+static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
+{
+ PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
+ PRTTIMER pTimer = pSubTimer->pParent;
+
+ AssertPtr(pTimer);
+#ifdef RT_STRICT
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
+ int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
+ if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
+ RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
+#endif
+
+ /*
+ * Check that we haven't been suspended before doing the callout.
+ */
+ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
+ && pTimer->u32Magic == RTTIMER_MAGIC)
+ {
+ ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
+
+ if (!pTimer->u64NanoInterval)
+ if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
+ ASMAtomicWriteBool(&pTimer->fSuspended, true);
+
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
+
+ ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
+ }
+
+ NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
+}
+
+
+/**
+ * The timer callback for an omni-timer.
+ *
+ * This is responsible for queueing the DPCs for the other CPUs and
+ * perform the callback on the CPU on which it is called.
+ *
+ * @param pDpc The DPC object.
+ * @param pvUser Pointer to the sub-timer.
+ * @param SystemArgument1 Some system stuff.
+ * @param SystemArgument2 Some system stuff.
+ */
+static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
+{
+ PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
+ PRTTIMER pTimer = pSubTimer->pParent;
+ int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
+
+ AssertPtr(pTimer);
+#ifdef RT_STRICT
+ if (KeGetCurrentIrql() < DISPATCH_LEVEL)
+ RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
+ if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
+ RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
+#endif
+
+ /*
+ * Check that we haven't been suspended before scheduling the other DPCs
+ * and doing the callout.
+ */
+ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
+ && pTimer->u32Magic == RTTIMER_MAGIC)
+ {
+ RTCPUSET OnlineSet;
+ RTMpGetOnlineSet(&OnlineSet);
+
+ ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
+
+ if (pTimer->u64NanoInterval)
+ {
+ /*
+ * Recurring timer.
+ */
+ for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
+ if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
+ && iCpuSelf != iCpu)
+ KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);
+
+ uint64_t iTick = ++pSubTimer->iTick;
+ rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc);
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
+ }
+ else
+ {
+ /*
+ * Single shot timers gets complicated wrt to fSuspended maintance.
+ */
+ uint32_t cCpus = 0;
+ for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
+ if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
+ cCpus++;
+ ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus);
+
+ for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
+ if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
+ && iCpuSelf != iCpu)
+ if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0))
+ ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */
+
+ if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
+ ASMAtomicWriteBool(&pTimer->fSuspended, true);
+
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
+ }
+
+ ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
+ }
+
+ NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
+}
+
+
+
+RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
+{
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
+ AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
+
+ if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
+ return VERR_TIMER_ACTIVE;
+ if ( pTimer->fSpecificCpu
+ && !RTMpIsCpuOnline(pTimer->idCpu))
+ return VERR_CPU_OFFLINE;
+
+ /*
+ * Start the timer.
+ */
+ PKDPC pMasterDpc = pTimer->fOmniTimer
+ ? &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc
+ : &pTimer->aSubTimers[0].NtDpc;
+
+#ifndef RTR0TIMER_NT_MANUAL_RE_ARM
+ uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
+ ULONG ulInterval = (ULONG)u64Interval;
+ if (ulInterval != u64Interval)
+ ulInterval = MAXLONG;
+ else if (!ulInterval && pTimer->u64NanoInterval)
+ ulInterval = 1;
+#endif
+
+ LARGE_INTEGER DueTime;
+ DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
+ if (!DueTime.QuadPart)
+ DueTime.QuadPart = -1;
+
+ unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1;
+ for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
+ pTimer->aSubTimers[iCpu].iTick = 0;
+ ASMAtomicWriteS32(&pTimer->cOmniSuspendCountDown, 0);
+ ASMAtomicWriteBool(&pTimer->fSuspended, false);
+#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
+ pTimer->uNtStartTime = rtTimerNtQueryInterruptTime() + u64First / 100;
+ KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
+#else
+ KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc);
+#endif
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker function that stops an active timer.
+ *
+ * Shared by RTTimerStop and RTTimerDestroy.
+ *
+ * @param pTimer The active timer.
+ */
+static void rtTimerNtStopWorker(PRTTIMER pTimer)
+{
+ /*
+ * Just cancel the timer, dequeue the DPCs and flush them (if this is supported).
+ */
+ ASMAtomicWriteBool(&pTimer->fSuspended, true);
+
+ KeCancelTimer(&pTimer->NtTimer);
+
+ for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++)
+ KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc);
+}
+
+
+RTDECL(int) RTTimerStop(PRTTIMER pTimer)
+{
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
+ AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
+
+ if (ASMAtomicUoReadBool(&pTimer->fSuspended))
+ return VERR_TIMER_SUSPENDED;
+
+ /*
+ * Call the worker we share with RTTimerDestroy.
+ */
+ rtTimerNtStopWorker(pTimer);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
+{
+ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
+ AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
+ RT_NOREF1(u64NanoInterval);
+
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
+{
+ /* It's ok to pass NULL pointer. */
+ if (pTimer == /*NIL_RTTIMER*/ NULL)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
+ AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
+
+ /*
+ * We do not support destroying a timer from the callback because it is
+ * not 101% safe since we cannot flush DPCs. Solaris has the same restriction.
+ */
+ AssertReturn(KeGetCurrentIrql() == PASSIVE_LEVEL, VERR_INVALID_CONTEXT);
+
+ /*
+ * Invalidate the timer, stop it if it's running and finally
+ * free up the memory.
+ */
+ ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
+ if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
+ rtTimerNtStopWorker(pTimer);
+
+ /*
+ * Flush DPCs to be on the safe side.
+ */
+ if (g_pfnrtNtKeFlushQueuedDpcs)
+ g_pfnrtNtKeFlushQueuedDpcs();
+
+ RTMemFree(pTimer);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
+{
+ *ppTimer = NULL;
+
+ /*
+ * Validate flags.
+ */
+ if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
+ return VERR_INVALID_PARAMETER;
+ if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
+ && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
+ && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
+ return VERR_CPU_NOT_FOUND;
+
+ /*
+ * Allocate the timer handler.
+ */
+ RTCPUID cSubTimers = 1;
+ if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
+ {
+ cSubTimers = RTMpGetMaxCpuId() + 1;
+ Assert(cSubTimers <= RTCPUSET_MAX_CPUS); /* On Windows we have a 1:1 relationship between cpuid and set index. */
+ }
+
+ PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cSubTimers]));
+ if (!pTimer)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize it.
+ */
+ pTimer->u32Magic = RTTIMER_MAGIC;
+ pTimer->cOmniSuspendCountDown = 0;
+ pTimer->fSuspended = true;
+ pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
+ pTimer->fOmniTimer = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
+ pTimer->idCpu = pTimer->fSpecificCpu ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK) : NIL_RTCPUID;
+ pTimer->cSubTimers = cSubTimers;
+ pTimer->pfnTimer = pfnTimer;
+ pTimer->pvUser = pvUser;
+ pTimer->u64NanoInterval = u64NanoInterval;
+ if (g_pfnrtKeInitializeTimerEx)
+ g_pfnrtKeInitializeTimerEx(&pTimer->NtTimer, SynchronizationTimer);
+ else
+ KeInitializeTimer(&pTimer->NtTimer);
+ int rc = VINF_SUCCESS;
+ if (pTimer->fOmniTimer)
+ {
+ /*
+ * Initialize the per-cpu "sub-timers", select the first online cpu
+ * to be the master.
+ * ASSUMES that no cpus will ever go offline.
+ */
+ pTimer->idCpu = NIL_RTCPUID;
+ for (unsigned iCpu = 0; iCpu < cSubTimers && RT_SUCCESS(rc); iCpu++)
+ {
+ pTimer->aSubTimers[iCpu].iTick = 0;
+ pTimer->aSubTimers[iCpu].pParent = pTimer;
+
+ if ( pTimer->idCpu == NIL_RTCPUID
+ && RTMpIsCpuOnline(RTMpCpuIdFromSetIndex(iCpu)))
+ {
+ pTimer->idCpu = RTMpCpuIdFromSetIndex(iCpu);
+ KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniMasterCallback, &pTimer->aSubTimers[iCpu]);
+ }
+ else
+ KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
+ if (g_pfnrtKeSetImportanceDpc)
+ g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[iCpu].NtDpc, HighImportance);
+ rc = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[iCpu].NtDpc, iCpu);
+ }
+ Assert(pTimer->idCpu != NIL_RTCPUID);
+ }
+ else
+ {
+ /*
+ * Initialize the first "sub-timer", target the DPC on a specific processor
+ * if requested to do so.
+ */
+ pTimer->aSubTimers[0].iTick = 0;
+ pTimer->aSubTimers[0].pParent = pTimer;
+
+ KeInitializeDpc(&pTimer->aSubTimers[0].NtDpc, rtTimerNtSimpleCallback, pTimer);
+ if (g_pfnrtKeSetImportanceDpc)
+ g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[0].NtDpc, HighImportance);
+ if (pTimer->fSpecificCpu)
+ rc = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, (int)pTimer->idCpu);
+ }
+ if (RT_SUCCESS(rc))
+ {
+ *ppTimer = pTimer;
+ return VINF_SUCCESS;
+ }
+
+ RTMemFree(pTimer);
+ return rc;
+}
+
+
+RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
+{
+ if (!g_pfnrtNtExSetTimerResolution)
+ return VERR_NOT_SUPPORTED;
+
+ ULONG ulGranted = g_pfnrtNtExSetTimerResolution(u32Request / 100, TRUE);
+ if (pu32Granted)
+ *pu32Granted = ulGranted * 100; /* NT -> ns */
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
+{
+ if (!g_pfnrtNtExSetTimerResolution)
+ return VERR_NOT_SUPPORTED;
+
+ g_pfnrtNtExSetTimerResolution(0 /* ignored */, FALSE);
+ NOREF(u32Granted);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(bool) RTTimerCanDoHighResolution(void)
+{
+ return false;
+}
+
diff --git a/src/VBox/Runtime/r0drv/nt/toxic-chkstk-r0drv-nt.asm b/src/VBox/Runtime/r0drv/nt/toxic-chkstk-r0drv-nt.asm
new file mode 100644
index 00000000..0f84edf3
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/nt/toxic-chkstk-r0drv-nt.asm
@@ -0,0 +1,42 @@
+; $Id: toxic-chkstk-r0drv-nt.asm $
+;; @file
+; IPRT - Toxic _chkstk symbol.
+;
+
+;
+; Copyright (C) 2006-2019 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+; The contents of this file may alternatively be used under the terms
+; of the Common Development and Distribution License Version 1.0
+; (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+; VirtualBox OSE distribution, in which case the provisions of the
+; CDDL are applicable instead of those of the GPL.
+;
+; You may elect to license modified versions of this file under the
+; terms and conditions of either the GPL or the CDDL or both.
+;
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "iprt/asmdefs.mac"
+
+BEGINCODE
+
+;;
+; Bad function to drag into kernel code as you're eating up too much stack.
+;
+BEGINPROC _chkstk
+%define MY_SYM _chkstk_is_considered_toxic_in_kernel_code__you_should_locate_code_using_too_much_stack_and_change_it_to_use_heap
+ extern MY_SYM
+ jmp MY_SYM
+ENDPROC _chkstk
+
diff --git a/src/VBox/Runtime/r0drv/os2/Makefile.kup b/src/VBox/Runtime/r0drv/os2/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/Makefile.kup
diff --git a/src/VBox/Runtime/r0drv/os2/RTR0AssertPanicSystem-r0drv-os2.asm b/src/VBox/Runtime/r0drv/os2/RTR0AssertPanicSystem-r0drv-os2.asm
new file mode 100644
index 00000000..17e37052
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/RTR0AssertPanicSystem-r0drv-os2.asm
@@ -0,0 +1,104 @@
+; $Id: RTR0AssertPanicSystem-r0drv-os2.asm $
+;; @file
+; IPRT - RTR0AssertPanicSystem, Ring-0 Driver, OS/2.
+;
+
+;
+; Copyright (c) 1999-2007 knut st. osmundsen <bird-src-spam@anduin.net>
+;
+; Permission is hereby granted, free of charge, to any person
+; obtaining a copy of this software and associated documentation
+; files (the "Software"), to deal in the Software without
+; restriction, including without limitation the rights to use,
+; copy, modify, merge, publish, distribute, sublicense, and/or sell
+; copies of the Software, and to permit persons to whom the
+; Software is furnished to do so, subject to the following
+; conditions:
+;
+; The above copyright notice and this permission notice shall be
+; included in all copies or substantial portions of the Software.
+;
+; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+; EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+; OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+; NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+; HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+; WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+; FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+; OTHER DEALINGS IN THE SOFTWARE.
+;
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%define RT_INCL_16BIT_SEGMENTS
+%include "iprt/asmdefs.mac"
+
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define DevHlp_InternalError 02bh
+
+
+;*******************************************************************************
+;* External Symbols *
+;*******************************************************************************
+extern KernThunkStackTo16
+extern KernThunkStackTo32
+extern NAME(g_szRTAssertMsg)
+extern NAME(g_cchRTAssertMsg)
+extern NAME(g_fpfnDevHlp)
+
+
+BEGINCODE
+
+BEGINPROC_EXPORTED RTR0AssertPanicSystem
+ push ebp
+ mov ebp, esp
+ push esi
+ push edi
+ push ds
+
+ ;
+ ; Try detect debug kernel... later one day.
+ ;
+
+
+ ;
+ ; Raise an IPE.
+ ;
+ call KernThunkStackTo16
+ ;jmp far dword NAME(RTR0AssertPanicSystem_16) wrt CODE16
+ db 066h
+ db 0eah
+ dw NAME(RTR0AssertPanicSystem_16) wrt CODE16
+ dw CODE16
+BEGINCODE16
+GLOBALNAME RTR0AssertPanicSystem_16
+ ; mov ax, seg NAME(g_szRTAssertMsg) - makes wlink crash.
+ mov ax, DATA16
+ mov ds, ax
+ mov si, NAME(g_szRTAssertMsg)
+ mov di, [NAME(g_cchRTAssertMsg)]
+ mov dl, DevHlp_InternalError
+ call far [NAME(g_fpfnDevHlp)]
+
+ ; Doesn't normally return, but in case it does...
+ ;jmp far dword NAME(RTR0AssertPanicSystem_32)
+ db 066h
+ db 0eah
+ dd NAME(RTR0AssertPanicSystem_32)
+ dw TEXT32 wrt FLAT
+BEGINCODE32:
+GLOBALNAME RTR0AssertPanicSystem_32
+ call KernThunkStackTo32
+ mov eax, 1
+ pop ds
+ pop edi
+ pop esi
+ leave
+ ret
+ENDPROC RTR0AssertPanicSystem
+
diff --git a/src/VBox/Runtime/r0drv/os2/RTR0Os2DHQueryDOSVar.asm b/src/VBox/Runtime/r0drv/os2/RTR0Os2DHQueryDOSVar.asm
new file mode 100644
index 00000000..e0c94b7f
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/RTR0Os2DHQueryDOSVar.asm
@@ -0,0 +1,183 @@
+; $Id: RTR0Os2DHQueryDOSVar.asm $
+;; @file
+; IPRT - DevHelp_GetDOSVar, Ring-0 Driver, OS/2.
+;
+
+;
+; Copyright (c) 1999-2007 knut st. osmundsen <bird-src-spam@anduin.net>
+;
+; Permission is hereby granted, free of charge, to any person
+; obtaining a copy of this software and associated documentation
+; files (the "Software"), to deal in the Software without
+; restriction, including without limitation the rights to use,
+; copy, modify, merge, publish, distribute, sublicense, and/or sell
+; copies of the Software, and to permit persons to whom the
+; Software is furnished to do so, subject to the following
+; conditions:
+;
+; The above copyright notice and this permission notice shall be
+; included in all copies or substantial portions of the Software.
+;
+; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+; EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+; OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+; NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+; HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+; WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+; FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+; OTHER DEALINGS IN THE SOFTWARE.
+;
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%define RT_INCL_16BIT_SEGMENTS
+%include "iprt/asmdefs.mac"
+%include "iprt/err.mac"
+
+
+;*******************************************************************************
+;* External Symbols *
+;*******************************************************************************
+extern KernThunkStackTo32
+extern KernThunkStackTo16
+extern NAME(g_fpfnDevHlp)
+
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define DevHlp_GetDOSVar 24h
+
+
+BEGINCODE
+
+;
+; Jump table used by RTR0Os2DHQueryDOSVar
+;
+DosVarJumpTab:
+ dd 0 ; 0 - Reserved
+ dd Load1600 ; 1 - GIS
+ dd Load1616 ; 2 - LIS
+ dd 0 ; 3 - Reserved
+ dd Load1616 ; 4 - VectorSDF
+ dd Load1616 ; 5 - VectorReboot
+ dd Load1616 ; 6 - VectorMSATS
+ dd AsIs ; 7 - YieldFlag (Resched)
+ dd AsIs ; 8 - TCYieldFlag (TCResched)
+ dd AsIs ; 9 - DOSTable
+ dd Load1616 ; a - VectorDEKKO
+ dd AsIs ; b - CodePgBuff
+ dd Load1616 ; c - VectorRIPL
+ dd AsIs ; d - InterruptLevel
+ dd AsIs ; e - DevClassTables
+ dd AsIs ; f - DMQS_Sel
+ dd AsIs ;10 - APMInfo
+ dd LoadWord ;11 - APM_Length (length of above structure)
+DosVarJumpTabEnd:
+%define DosVarJumpTabSize (DosVarJumpTabEnd - DosVarJumpTab) / 4
+
+;;
+; Unified DevHelp_GetDOSVar -> Far 16:16 pointer wrapper.
+;
+; @param iVar [ebp + 08h] Variable.
+; @param iMember [ebp + 0ch] Member.
+; @param pfp [ebp + 10h] Where to store the variable address (pointer to 16:16).
+;
+BEGINPROC_EXPORTED RTR0Os2DHQueryDOSVar
+ ; switch stack first.
+ call KernThunkStackTo16
+
+ ; normal prolog.
+ push ebp
+ mov ebp, esp
+ push dword [NAME(g_fpfnDevHlp)] ; ebp - 4
+ push ebx ; save ebx
+ push es ; save es
+
+ ; setup the devhelp call and switch to
+ mov eax, [ebp + 08h] ; iVar (8-bit)
+ mov ecx, [ebp + 0ch] ; iMember (16-bit)
+ mov dl, DevHlp_GetDOSVar
+
+ ; jump to the 16-bit code.
+ ;jmp far dword NAME(RTR0Os2DHQueryDOSVar_16) wrt CODE16
+ db 066h
+ db 0eah
+ dw NAME(RTR0Os2DHQueryDOSVar_16) wrt CODE16
+ dw CODE16
+BEGINCODE16
+GLOBALNAME RTR0Os2DHQueryDOSVar_16
+ call far [ss:ebp - 4]
+
+ ;jmp far dword NAME(RTR0Os2DHQueryDOSVar) wrt FLAT
+ db 066h
+ db 0eah
+ dd NAME(RTR0Os2DHQueryDOSVar_32) ;wrt FLAT
+ dw TEXT32 wrt FLAT
+BEGINCODE
+GLOBALNAME RTR0Os2DHQueryDOSVar_32
+ jc Error1
+
+ ;
+ ; Make ax:ebx contain the pointer and take action according
+ ; to the variable jump table.
+ ;
+ and ebx, 0000ffffh ; clean high part of ebx
+ movzx ecx, byte [ebp + 08] ; iVar
+ cmp ecx, DosVarJumpTabSize
+ jg Error2
+ jmp [DosVarJumpTab + ecx * 4]
+
+ ; Load Word at ax:ebx.
+LoadWord:
+ mov es, ax
+ movzx edx, word [es:ebx]
+ jmp StoreIt
+
+ ; Load selector at ax:ebx.
+Load1600:
+ mov es, ax
+ movzx edx, word [es:ebx]
+ shl edx, 16
+ jmp StoreIt
+
+ ; Load 16:16 ptr at ax:ebx.
+Load1616:
+ mov es, ax
+ mov edx, dword [es:ebx]
+ jmp StoreIt
+
+ ; Move ax:bx into edx.
+AsIs:
+ mov dx, ax
+ shl edx, 16
+ mov dx, bx
+ jmp StoreIt
+
+Error2:
+ mov eax, VERR_INVALID_PARAMETER
+ jmp Done
+
+Error1:
+ mov eax, VERR_GENERAL_FAILURE
+ jmp Done
+
+StoreIt:
+ mov ecx, [ebp + 10h]
+ mov [ecx], edx
+ xor eax, eax ; return success (VINF_SUCCESS == 0)
+
+Done:
+ pop es
+ pop ebx
+ leave
+
+ ; switch stack back and return.
+ push eax
+ call KernThunkStackTo32
+ pop eax
+ ret
+ENDPROC RTR0Os2DHQueryDOSVar
+
diff --git a/src/VBox/Runtime/r0drv/os2/RTR0Os2DHVMGlobalToProcess.asm b/src/VBox/Runtime/r0drv/os2/RTR0Os2DHVMGlobalToProcess.asm
new file mode 100644
index 00000000..e1b44d42
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/RTR0Os2DHVMGlobalToProcess.asm
@@ -0,0 +1,114 @@
+; $Id: RTR0Os2DHVMGlobalToProcess.asm $
+;; @file
+; IPRT - DevHelp_VMGlobalToProcess, Ring-0 Driver, OS/2.
+;
+
+;
+; Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+;
+; Permission is hereby granted, free of charge, to any person
+; obtaining a copy of this software and associated documentation
+; files (the "Software"), to deal in the Software without
+; restriction, including without limitation the rights to use,
+; copy, modify, merge, publish, distribute, sublicense, and/or sell
+; copies of the Software, and to permit persons to whom the
+; Software is furnished to do so, subject to the following
+; conditions:
+;
+; The above copyright notice and this permission notice shall be
+; included in all copies or substantial portions of the Software.
+;
+; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+; EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+; OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+; NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+; HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+; WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+; FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+; OTHER DEALINGS IN THE SOFTWARE.
+;
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%define RT_INCL_16BIT_SEGMENTS
+%include "iprt/asmdefs.mac"
+%include "iprt/err.mac"
+
+
+;*******************************************************************************
+;* External Symbols *
+;*******************************************************************************
+extern KernThunkStackTo32
+extern KernThunkStackTo16
+extern NAME(g_fpfnDevHlp)
+
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define DevHlp_VMGlobalToProcess 05ah
+
+
+BEGINCODE
+
+;;
+; VMGlobalToProcess wrapper.
+;
+; @param fFlags [ebp + 08h] Flags
+; @param pvR0 [ebp + 0ch] Ring-0 memory.
+; @param cb [ebp + 10h] Size of memory object to map.
+; @param ppR3 [ebp + 14h] Where to store the address of the ring-3 mapping.
+;
+BEGINPROC_EXPORTED RTR0Os2DHVMGlobalToProcess
+ ; switch stack first.
+ call KernThunkStackTo16
+
+ ; normal prolog.
+ push ebp
+ mov ebp, esp
+ push dword [NAME(g_fpfnDevHlp)] ; ebp - 4
+ push ebx ; save ebx
+
+ ; setup the devhelp call
+ mov eax, [ebp + 08h] ; fFlags
+ mov ebx, [ebp + 0ch] ; pvR0
+ mov ecx, [ebp + 10h] ; cb
+ mov dl, DevHlp_VMGlobalToProcess
+
+ ; jump to the 16-bit code.
+ ;jmp far dword NAME(RTR0Os2DHQueryDOSVar_16) wrt CODE16
+ db 066h
+ db 0eah
+ dw NAME(RTR0Os2DHVMGlobalToProcess_16) wrt CODE16
+ dw CODE16
+BEGINCODE16
+GLOBALNAME RTR0Os2DHVMGlobalToProcess_16
+ call far [ss:ebp - 4]
+
+ ;jmp far dword NAME(RTR0Os2DHVMGlobalToProcess_32) wrt FLAT
+ db 066h
+ db 0eah
+ dd NAME(RTR0Os2DHVMGlobalToProcess_32) ;wrt FLAT
+ dw TEXT32 wrt FLAT
+BEGINCODE
+GLOBALNAME RTR0Os2DHVMGlobalToProcess_32
+ jc .done
+
+ ; save the result.
+ mov edx, [ebp + 14h] ; ppvR3
+ mov [edx], eax
+ xor eax, eax
+
+.done:
+ pop ebx
+ leave
+
+ ; switch stack back and return.
+ push eax
+ call KernThunkStackTo32
+ pop eax
+ ret
+ENDPROC RTR0Os2DHVMGlobalToProcess
+
diff --git a/src/VBox/Runtime/r0drv/os2/alloc-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/alloc-r0drv-os2.cpp
new file mode 100644
index 00000000..c448a987
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/alloc-r0drv-os2.cpp
@@ -0,0 +1,107 @@
+/* $Id: alloc-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/param.h>
+#include "r0drv/alloc-r0drv.h" /** @todo drop the r0drv/alloc-r0drv.cpp stuff on OS/2? */
+
+
+DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
+{
+ if (fFlags & RTMEMHDR_FLAG_ANY_CTX)
+ return VERR_NOT_SUPPORTED;
+
+ void *pv = NULL;
+ APIRET rc = KernVMAlloc(cb + sizeof(RTMEMHDR), VMDHA_FIXED, &pv, (void **)-1, NULL);
+ if (RT_UNLIKELY(rc != NO_ERROR))
+ return RTErrConvertFromOS2(rc);
+
+ PRTMEMHDR pHdr = (PRTMEMHDR)pv;
+ pHdr->u32Magic = RTMEMHDR_MAGIC;
+ pHdr->fFlags = fFlags;
+ pHdr->cb = cb;
+ pHdr->cbReq = cb;
+ *ppHdr = pHdr;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr)
+{
+ pHdr->u32Magic += 1;
+ APIRET rc = KernVMFree(pHdr);
+ Assert(!rc); NOREF(rc);
+}
+
+
+RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
+{
+ /*
+ * Validate input.
+ */
+ AssertPtr(pPhys);
+ Assert(cb > 0);
+
+ /*
+ * All physical memory in OS/2 is below 4GB, so this should be kind of easy.
+ */
+ cb = RT_ALIGN_Z(cb, PAGE_SIZE); /* -> page aligned result. */
+ PVOID pv = NULL;
+ PVOID PhysAddr = (PVOID)~0UL;
+ APIRET rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pv, &PhysAddr, NULL);
+ if (!rc)
+ {
+ Assert(PhysAddr != (PVOID)~0UL);
+ Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
+ *pPhys = (uintptr_t)PhysAddr;
+ return pv;
+ }
+ return NULL;
+}
+
+
+RTR0DECL(void) RTMemContFree(void *pv, size_t cb)
+{
+ if (pv)
+ {
+ AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv));
+ KernVMFree(pv);
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/assert-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/assert-r0drv-os2.cpp
new file mode 100644
index 00000000..7c08db34
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/assert-r0drv-os2.cpp
@@ -0,0 +1,134 @@
+/* $Id: assert-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Assertion Workers, Ring-0 Drivers, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/assert.h>
+#include <iprt/log.h>
+#include <iprt/string.h>
+#include <iprt/stdarg.h>
+
+#include <VBox/log.h>
+
+#include "internal/assert.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+RT_C_DECLS_BEGIN /* for watcom */
+/** The last assert message. (in DATA16) */
+extern char g_szRTAssertMsg[2048];
+/** The length of the last assert message. (in DATA16) */
+extern size_t g_cchRTAssertMsg;
+RT_C_DECLS_END
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static DECLCALLBACK(size_t) rtR0Os2AssertOutputCB(void *pvArg, const char *pachChars, size_t cbChars);
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
+{
+#if defined(DEBUG_bird)
+ RTLogComPrintf("\n!!Assertion Failed!!\n"
+ "Expression: %s\n"
+ "Location : %s(%d) %s\n",
+ pszExpr, pszFile, uLine, pszFunction);
+#endif
+
+ g_cchRTAssertMsg = RTStrPrintf(g_szRTAssertMsg, sizeof(g_szRTAssertMsg),
+ "\r\n!!Assertion Failed!!\r\n"
+ "Expression: %s\r\n"
+ "Location : %s(%d) %s\r\n",
+ pszExpr, pszFile, uLine, pszFunction);
+}
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg2V(bool fInitial, const char *pszFormat, va_list va)
+{
+#if defined(DEBUG_bird)
+ va_list vaCopy;
+ va_copy(vaCopy, va);
+ RTLogComPrintfV(pszFormat, vaCopy);
+ va_end(vaCopy);
+#endif
+
+ size_t cch = g_cchRTAssertMsg;
+ char *pch = &g_szRTAssertMsg[cch];
+ cch += RTStrFormatV(rtR0Os2AssertOutputCB, &pch, NULL, NULL, pszFormat, va);
+ g_cchRTAssertMsg = cch;
+
+ NOREF(fInitial);
+}
+
+
+/**
+ * Output callback.
+ *
+ * @returns number of bytes written.
+ * @param pvArg Pointer to a char pointer with the current output position.
+ * @param pachChars Pointer to an array of utf-8 characters.
+ * @param cbChars Number of bytes in the character array pointed to by pachChars.
+ */
+static DECLCALLBACK(size_t) rtR0Os2AssertOutputCB(void *pvArg, const char *pachChars, size_t cbChars)
+{
+ char **ppch = (char **)pvArg;
+ char *pch = *ppch;
+
+ while (cbChars-- > 0)
+ {
+ const char ch = *pachChars++;
+ if (ch == '\r')
+ continue;
+ if (ch == '\n')
+ {
+ if (pch + 1 >= &g_szRTAssertMsg[sizeof(g_szRTAssertMsg)])
+ break;
+ *pch++ = '\r';
+ }
+ if (pch + 1 >= &g_szRTAssertMsg[sizeof(g_szRTAssertMsg)])
+ break;
+ *pch++ = ch;
+ }
+ *pch = '\0';
+
+ size_t cbWritten = pch - *ppch;
+ *ppch = pch;
+ return cbWritten;
+}
+
+
+/* RTR0AssertPanicSystem is implemented in RTR0AssertPanicSystem-r0drv-os2.asm */
+
diff --git a/src/VBox/Runtime/r0drv/os2/assertA-r0drv-os2.asm b/src/VBox/Runtime/r0drv/os2/assertA-r0drv-os2.asm
new file mode 100644
index 00000000..0e0808aa
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/assertA-r0drv-os2.asm
@@ -0,0 +1,47 @@
+; $Id: assertA-r0drv-os2.asm $
+;; @file
+; IPRT - DevHelp_GetDOSVar, Ring-0 Driver, OS/2.
+;
+
+;
+; Copyright (c) 1999-2007 knut st. osmundsen <bird-src-spam@anduin.net>
+;
+; Permission is hereby granted, free of charge, to any person
+; obtaining a copy of this software and associated documentation
+; files (the "Software"), to deal in the Software without
+; restriction, including without limitation the rights to use,
+; copy, modify, merge, publish, distribute, sublicense, and/or sell
+; copies of the Software, and to permit persons to whom the
+; Software is furnished to do so, subject to the following
+; conditions:
+;
+; The above copyright notice and this permission notice shall be
+; included in all copies or substantial portions of the Software.
+;
+; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+; EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+; OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+; NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+; HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+; WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+; FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+; OTHER DEALINGS IN THE SOFTWARE.
+;
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%define RT_INCL_16BIT_SEGMENTS
+%include "iprt/asmdefs.mac"
+
+BEGINDATA16
+
+;; The last assert message. (see assert-r0drv-os2.cpp)
+EXPORTEDNAME g_szRTAssertMsg
+times 2048 db 0
+
+;; The length of the last assert message.
+EXPORTEDNAME g_cchRTAssertMsg
+ dd 0
+
diff --git a/src/VBox/Runtime/r0drv/os2/initterm-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/initterm-r0drv-os2.cpp
new file mode 100644
index 00000000..d77fbd68
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/initterm-r0drv-os2.cpp
@@ -0,0 +1,98 @@
+/* $Id: initterm-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Initialization & Termination, Ring-0 Driver, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+
+#include "internal/initterm.h"
+#include <iprt/errcore.h>
+#include <iprt/assert.h>
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Pointer to the 1st DOS variable table. */
+PCDOSTABLE g_pDosTable = NULL;
+/** Pointer to the 2nd DOS variable table. */
+PCDOSTABLE2 g_pDosTable2 = NULL;
+/** Pointer to the global info segment. */
+PGINFOSEG g_pGIS = NULL;
+/** Far 16:16 pointer to the local info segment.
+ * IIRC this must be converted to a flat pointer when accessed to work correctly on SMP systems. */
+RTFAR16 g_fpLIS = {0, 0};
+
+
+DECLHIDDEN(int) rtR0InitNative(void)
+{
+ /*
+ * Get the DOS Tables.
+ */
+ RTFAR16 fp;
+ int rc = RTR0Os2DHQueryDOSVar(DHGETDOSV_DOSTABLES, 0, &fp);
+ AssertMsgReturn(!rc, ("rc=%d\n", rc), VERR_INTERNAL_ERROR);
+ g_pDosTable = (PCDOSTABLE)RTR0Os2Virt2Flat(fp);
+ AssertReturn(g_pDosTable, VERR_INTERNAL_ERROR);
+ g_pDosTable2 = (PCDOSTABLE2)((const uint8_t *)g_pDosTable + g_pDosTable->cul * sizeof(ULONG) + 1);
+
+ /*
+ * Get the addresses of the two info segments.
+ */
+ rc = RTR0Os2DHQueryDOSVar(DHGETDOSV_SYSINFOSEG, 0, &fp);
+ AssertMsgReturn(!rc, ("rc=%d\n", rc), VERR_INTERNAL_ERROR);
+ g_pGIS = (PGINFOSEG)RTR0Os2Virt2Flat(fp);
+ rc = RTR0Os2DHQueryDOSVar(DHGETDOSV_LOCINFOSEG, 0, &fp);
+ AssertMsgReturn(!rc, ("rc=%d\n", rc), VERR_INTERNAL_ERROR);
+ g_fpLIS = fp;
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0TermNative(void)
+{
+ /* nothing to do here yet. */
+}
+
+
+/**
+ * Converts a 16:16 pointer to a flat pointer.
+ *
+ * @returns Flat pointer (NULL if fp is NULL).
+ * @param fp Far pointer to convert.
+ */
+RTR0DECL(void *) RTR0Os2Virt2Flat(RTFAR16 fp)
+{
+ return (void *)KernSelToFlat(((uint32_t)fp.sel << 16) | fp.off);
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp
new file mode 100644
index 00000000..4fb0abd8
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp
@@ -0,0 +1,572 @@
+/* $Id: memobj-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+
+#include <iprt/memobj.h>
+#include <iprt/mem.h>
+#include <iprt/err.h>
+#include <iprt/assert.h>
+#include <iprt/log.h>
+#include <iprt/param.h>
+#include <iprt/process.h>
+#include "internal/memobj.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The OS/2 version of the memory object structure.
+ */
+typedef struct RTR0MEMOBJDARWIN
+{
+ /** The core structure. */
+ RTR0MEMOBJINTERNAL Core;
+ /** Lock for the ring-3 / ring-0 pinned objectes.
+ * This member might not be allocated for some object types. */
+ KernVMLock_t Lock;
+ /** Array of physical pages.
+ * This array can be 0 in length for some object types. */
+ KernPageList_t aPages[1];
+} RTR0MEMOBJOS2, *PRTR0MEMOBJOS2;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet);
+
+
+DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
+{
+ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
+ int rc;
+
+ switch (pMemOs2->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
+ return VERR_INTERNAL_ERROR;
+
+ case RTR0MEMOBJTYPE_PHYS:
+ if (!pMemOs2->Core.pv)
+ break;
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ if (pMemOs2->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
+ break;
+
+ RT_FALL_THRU();
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_CONT:
+ rc = KernVMFree(pMemOs2->Core.pv);
+ AssertMsg(!rc, ("rc=%d type=%d pv=%p cb=%#zx\n", rc, pMemOs2->Core.enmType, pMemOs2->Core.pv, pMemOs2->Core.cb));
+ break;
+
+ case RTR0MEMOBJTYPE_LOCK:
+ rc = KernVMUnlock(&pMemOs2->Lock);
+ AssertMsg(!rc, ("rc=%d\n", rc));
+ break;
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ default:
+ AssertMsgFailed(("enmType=%d\n", pMemOs2->Core.enmType));
+ return VERR_INTERNAL_ERROR;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ NOREF(fExecutable);
+
+ /* create the object. */
+ const ULONG cPages = cb >> PAGE_SHIFT;
+ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
+ RTR0MEMOBJTYPE_PAGE, NULL, cb);
+ if (!pMemOs2)
+ return VERR_NO_MEMORY;
+
+ /* do the allocation. */
+ int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
+ if (!rc)
+ {
+ ULONG cPagesRet = cPages;
+ rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
+ if (!rc)
+ {
+ rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
+ *ppMem = &pMemOs2->Core;
+ return VINF_SUCCESS;
+ }
+ KernVMFree(pMemOs2->Core.pv);
+ }
+ rtR0MemObjDelete(&pMemOs2->Core);
+ return RTErrConvertFromOS2(rc);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ NOREF(fExecutable);
+
+ /* create the object. */
+ const ULONG cPages = cb >> PAGE_SHIFT;
+ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
+ RTR0MEMOBJTYPE_LOW, NULL, cb);
+ if (!pMemOs2)
+ return VERR_NO_MEMORY;
+
+ /* do the allocation. */
+ int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
+ if (!rc)
+ {
+ ULONG cPagesRet = cPages;
+ rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
+ if (!rc)
+ {
+ rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
+ *ppMem = &pMemOs2->Core;
+ return VINF_SUCCESS;
+ }
+ KernVMFree(pMemOs2->Core.pv);
+ }
+ rtR0MemObjDelete(&pMemOs2->Core);
+ rc = RTErrConvertFromOS2(rc);
+ return rc == VERR_NO_MEMORY ? VERR_NO_LOW_MEMORY : rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ NOREF(fExecutable);
+
+ /* create the object. */
+ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_CONT, NULL, cb);
+ if (!pMemOs2)
+ return VERR_NO_MEMORY;
+
+ /* do the allocation. */
+ ULONG ulPhys = ~0UL;
+ int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
+ if (!rc)
+ {
+ Assert(ulPhys != ~0UL);
+ pMemOs2->Core.u.Cont.Phys = ulPhys;
+ *ppMem = &pMemOs2->Core;
+ return VINF_SUCCESS;
+ }
+ rtR0MemObjDelete(&pMemOs2->Core);
+ return RTErrConvertFromOS2(rc);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
+{
+ AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
+
+ /** @todo alignment */
+ if (uAlignment != PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /* create the object. */
+ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (!pMemOs2)
+ return VERR_NO_MEMORY;
+
+ /* do the allocation. */
+ ULONG ulPhys = ~0UL;
+ int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
+ if (!rc)
+ {
+ Assert(ulPhys != ~0UL);
+ pMemOs2->Core.u.Phys.fAllocated = true;
+ pMemOs2->Core.u.Phys.PhysBase = ulPhys;
+ *ppMem = &pMemOs2->Core;
+ return VINF_SUCCESS;
+ }
+ rtR0MemObjDelete(&pMemOs2->Core);
+ return RTErrConvertFromOS2(rc);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
+{
+ /** @todo rtR0MemObjNativeAllocPhys / darwin. */
+ return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest, PAGE_SIZE);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
+{
+ AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
+
+ /* create the object. */
+ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (!pMemOs2)
+ return VERR_NO_MEMORY;
+
+ /* there is no allocation here, right? it needs to be mapped somewhere first. */
+ pMemOs2->Core.u.Phys.fAllocated = false;
+ pMemOs2->Core.u.Phys.PhysBase = Phys;
+ pMemOs2->Core.u.Phys.uCachePolicy = uCachePolicy;
+ *ppMem = &pMemOs2->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
+ RTR0PROCESS R0Process)
+{
+ AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
+
+ /* create the object. */
+ const ULONG cPages = cb >> PAGE_SHIFT;
+ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
+ RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
+ if (!pMemOs2)
+ return VERR_NO_MEMORY;
+
+ /* lock it. */
+ ULONG cPagesRet = cPages;
+ int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
+ (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
+ if (!rc)
+ {
+ rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
+ Assert(cb == pMemOs2->Core.cb);
+ Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv);
+ pMemOs2->Core.u.Lock.R0Process = R0Process;
+ *ppMem = &pMemOs2->Core;
+ return VINF_SUCCESS;
+ }
+ rtR0MemObjDelete(&pMemOs2->Core);
+ return RTErrConvertFromOS2(rc);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
+{
+ /* create the object. */
+ const ULONG cPages = cb >> PAGE_SHIFT;
+ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
+ RTR0MEMOBJTYPE_LOCK, pv, cb);
+ if (!pMemOs2)
+ return VERR_NO_MEMORY;
+
+ /* lock it. */
+ ULONG cPagesRet = cPages;
+ int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
+ pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
+ if (!rc)
+ {
+ rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
+ pMemOs2->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
+ *ppMem = &pMemOs2->Core;
+ return VINF_SUCCESS;
+ }
+ rtR0MemObjDelete(&pMemOs2->Core);
+ return RTErrConvertFromOS2(rc);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
+{
+ RT_NOREF(ppMem, pvFixed, cb, uAlignment);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
+ RTR0PROCESS R0Process)
+{
+ RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
+ unsigned fProt, size_t offSub, size_t cbSub)
+{
+ AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
+ AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
+
+ /*
+ * Check that the specified alignment is supported.
+ */
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+
+/** @todo finish the implementation. */
+
+ int rc;
+ void *pvR0 = NULL;
+ PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
+ switch (pMemToMapOs2->Core.enmType)
+ {
+ /*
+ * These has kernel mappings.
+ */
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_CONT:
+ pvR0 = pMemToMapOs2->Core.pv;
+ break;
+
+ case RTR0MEMOBJTYPE_PHYS:
+ pvR0 = pMemToMapOs2->Core.pv;
+ if (!pvR0)
+ {
+ /* no ring-0 mapping, so allocate a mapping in the process. */
+ AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
+ Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
+ ULONG ulPhys = (ULONG)pMemToMapOs2->Core.u.Phys.PhysBase;
+ AssertReturn(ulPhys == pMemToMapOs2->Core.u.Phys.PhysBase, VERR_OUT_OF_RANGE);
+ rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS, &pvR0, (PPVOID)&ulPhys, NULL);
+ if (rc)
+ return RTErrConvertFromOS2(rc);
+ pMemToMapOs2->Core.pv = pvR0;
+ }
+ break;
+
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
+ return VERR_INTERNAL_ERROR_3;
+
+ case RTR0MEMOBJTYPE_LOCK:
+ if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
+ return VERR_NOT_SUPPORTED; /** @todo implement this... */
+ pvR0 = pMemToMapOs2->Core.pv;
+ break;
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ case RTR0MEMOBJTYPE_MAPPING:
+ default:
+ AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
+ return VERR_INTERNAL_ERROR;
+ }
+
+ /*
+ * Create a dummy mapping object for it.
+ *
+ * All mappings are read/write/execute in OS/2 and there isn't
+ * any cache options, so sharing is ok. And the main memory object
+ * isn't actually freed until all the mappings have been freed up
+ * (reference counting).
+ */
+ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING,
+ pvR0, pMemToMapOs2->Core.cb);
+ if (pMemOs2)
+ {
+ pMemOs2->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
+ *ppMem = &pMemOs2->Core;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
+{
+ AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
+ AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ int rc;
+ void *pvR0;
+ void *pvR3 = NULL;
+ PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
+ switch (pMemToMapOs2->Core.enmType)
+ {
+ /*
+ * These has kernel mappings.
+ */
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_CONT:
+ pvR0 = pMemToMapOs2->Core.pv;
+ break;
+
+ case RTR0MEMOBJTYPE_PHYS:
+ pvR0 = pMemToMapOs2->Core.pv;
+#if 0/* this is wrong. */
+ if (!pvR0)
+ {
+ /* no ring-0 mapping, so allocate a mapping in the process. */
+ AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
+ Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
+ ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
+ rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS | VMDHA_PROCESS, &pvR3, (PPVOID)&ulPhys, NULL);
+ if (rc)
+ return RTErrConvertFromOS2(rc);
+ }
+ break;
+#endif
+ return VERR_NOT_SUPPORTED;
+
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
+ return VERR_INTERNAL_ERROR_5;
+
+ case RTR0MEMOBJTYPE_LOCK:
+ if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
+ return VERR_NOT_SUPPORTED; /** @todo implement this... */
+ pvR0 = pMemToMapOs2->Core.pv;
+ break;
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ case RTR0MEMOBJTYPE_MAPPING:
+ default:
+ AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
+ return VERR_INTERNAL_ERROR;
+ }
+
+ /*
+ * Map the ring-0 memory into the current process.
+ */
+ if (!pvR3)
+ {
+ Assert(pvR0);
+ ULONG flFlags = 0;
+ if (uAlignment == PAGE_SIZE)
+ flFlags |= VMDHGP_4MB;
+ if (fProt & RTMEM_PROT_WRITE)
+ flFlags |= VMDHGP_WRITE;
+ rc = RTR0Os2DHVMGlobalToProcess(flFlags, pvR0, pMemToMapOs2->Core.cb, &pvR3);
+ if (rc)
+ return RTErrConvertFromOS2(rc);
+ }
+ Assert(pvR3);
+
+ /*
+ * Create a mapping object for it.
+ */
+ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING,
+ pvR3, pMemToMapOs2->Core.cb);
+ if (pMemOs2)
+ {
+ Assert(pMemOs2->Core.pv == pvR3);
+ pMemOs2->Core.u.Mapping.R0Process = R0Process;
+ *ppMem = &pMemOs2->Core;
+ return VINF_SUCCESS;
+ }
+ KernVMFree(pvR3);
+ return VERR_NO_MEMORY;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
+{
+ NOREF(pMem);
+ NOREF(offSub);
+ NOREF(cbSub);
+ NOREF(fProt);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
+{
+ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
+
+ switch (pMemOs2->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_LOCK:
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ return pMemOs2->aPages[iPage].Addr;
+
+ case RTR0MEMOBJTYPE_CONT:
+ return pMemOs2->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
+
+ case RTR0MEMOBJTYPE_PHYS:
+ return pMemOs2->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ case RTR0MEMOBJTYPE_MAPPING:
+ default:
+ return NIL_RTHCPHYS;
+ }
+}
+
+
+/**
+ * Expands the page list so we can index pages directly.
+ *
+ * @param paPages The page list array to fix.
+ * @param cPages The number of pages that's supposed to go into the list.
+ * @param cPagesRet The actual number of pages in the list.
+ */
+static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet)
+{
+ Assert(cPages >= cPagesRet);
+ if (cPages != cPagesRet)
+ {
+ ULONG iIn = cPagesRet;
+ ULONG iOut = cPages;
+ do
+ {
+ iIn--;
+ iOut--;
+ Assert(iIn <= iOut);
+
+ KernPageList_t Page = paPages[iIn];
+ Assert(!(Page.Addr & PAGE_OFFSET_MASK));
+ Assert(Page.Size == RT_ALIGN_Z(Page.Size, PAGE_SIZE));
+
+ if (Page.Size > PAGE_SIZE)
+ {
+ do
+ {
+ Page.Size -= PAGE_SIZE;
+ paPages[iOut].Addr = Page.Addr + Page.Size;
+ paPages[iOut].Size = PAGE_SIZE;
+ iOut--;
+ } while (Page.Size > PAGE_SIZE);
+ }
+
+ paPages[iOut].Addr = Page.Addr;
+ paPages[iOut].Size = PAGE_SIZE;
+ } while ( iIn != iOut
+ && iIn > 0);
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/memuserkernel-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/memuserkernel-r0drv-os2.cpp
new file mode 100644
index 00000000..c1251db6
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/memuserkernel-r0drv-os2.cpp
@@ -0,0 +1,91 @@
+/* $Id: memuserkernel-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - User & Kernel Memory, Ring-0 Driver, OS/2.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+
+#include <iprt/mem.h>
+#include <iprt/errcore.h>
+
+
+RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb)
+{
+ int rc = KernCopyIn(pvDst, (void *)R3PtrSrc, cb);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb)
+{
+ int rc = KernCopyOut((void *)R3PtrDst, (void *)pvSrc, cb);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(bool) RTR0MemUserIsValidAddr(RTR3PTR R3Ptr)
+{
+ /** @todo this is all wrong, but I'm too lazy to figure out how to make it
+ * correct. Checking the user DS limit would work if it wasn't maxed
+ * out by SDD, VPC or someone. The version (+SMP) would help on older
+ * OS/2 versions where the limit is 512MB. */
+ return R3Ptr < UINT32_C(0xc0000000); /* 3GB */
+}
+
+
+RTR0DECL(bool) RTR0MemKernelIsValidAddr(void *pv)
+{
+ /** @todo this is all wrong, see RTR0MemUserIsValidAddr. */
+ return (uintptr_t)pv >= UINT32_C(0x20000000); /* 512MB */
+}
+
+
+RTR0DECL(bool) RTR0MemAreKrnlAndUsrDifferent(void)
+{
+ /** @todo this is all wrong, see RTR0MemUserIsValidAddr. */
+ return false;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyFrom(void *pvDst, void const *pvSrc, size_t cb)
+{
+ RT_NOREF(pvDst, pvSrc, cb);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyTo(void *pvDst, void const *pvSrc, size_t cb)
+{
+ RT_NOREF(pvDst, pvSrc, cb);
+ return VERR_NOT_SUPPORTED;
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/os2imports.imp b/src/VBox/Runtime/r0drv/os2/os2imports.imp
new file mode 100644
index 00000000..26feb406
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/os2imports.imp
@@ -0,0 +1,121 @@
+;
+; DOSCALLS
+;
+DOS16OPEN DOSCALLS 70 ?
+DOS16CLOSE DOSCALLS 59 ?
+DOS16READ DOSCALLS 137 ?
+DOS16WRITE DOSCALL1 138 ?
+DOS16IREAD DOSCALL1 79 ?
+DOS16IWRITE DOSCALL1 87 ?
+DOS16DEVIOCTL DOSCALL1 53 ?
+DOS16DEVIOCTL2 DOSCALL1 99 ?
+DOS16FSATTACH DOSCALL1 181 ?
+DOS16FSCTL DOSCALL1 183 ?
+
+;
+; KEE
+;
+KernAllocSpinLock KEE 10 ?
+KernFreeSpinLock KEE 11 ?
+KernAcquireSpinLock KEE 12 ?
+KernReleaseSpinLock KEE 13 ?
+KernAllocMutexLock KEE 20 ?
+KernFreeMutexLock KEE 21 ?
+KernRequestSharedMutex KEE 22 ?
+KernReleaseSharedMutex KEE 23 ?
+KernTryRequestSharedMutex KEE 24 ?
+KernRequestExclusiveMutex KEE 25 ?
+KernReleaseExclusiveMutex KEE 26 ?
+KernTryRequestExclusiveMutex KEE 27 ?
+KernBlock KEE 30 ?
+KernWakeup KEE 31 ?
+KernThunkStackTo16 KEE 40 ?
+KernThunkStackTo32 KEE 41 ?
+KernSerialize16BitDD KEE 42 ?
+KernUnserialize16BitDD KEE 43 ?
+KernArmHook KEE 44 ?
+KernAllocateContextHook KEE 45 ?
+KernCopyIn KEE 50 ?
+KernCopyOut KEE 51 ?
+KernVMAlloc KEE 60 ?
+KernVMFree KEE 61 ?
+KernVMLock KEE 62 ?
+KernVMUnlock KEE 63 ?
+KernLinToPageList KEE 64 ?
+KernVMSetMem KEE 65 ?
+KernSelToFlat KEE 66 ?
+KernDynamicAPI KEE 70 ?
+KernRASSysTrace KEE 80 ?
+KernPerfSysTrace KEE 81 ?
+_KernSISData KEE 90 ?
+_KernLISData KEE 91 ?
+_KernInterruptLevel KEE 92 ?
+_KernTKSSBase KEE 93 ?
+_KernKEEVersion KEE 94 ?
+KernLockFile KEE 100 ?
+KernUnLockFile KEE 101 ?
+KernGetFileSize KEE 102 ?
+KernTestFileCache KEE 103 ?
+KernReadFileAt KEE 104 ?
+KernReadFileAtCache KEE 105 ?
+KernReturnFileCache KEE 106 ?
+KernCreateUconvObject KEE 120 ?
+KernStrFromUcs KEE 121 ?
+KernStrToUcs KEE 122 ?
+
+FSH_SEGALLOC FSHELPER 1 ?
+FSH_SEGFREE FSHELPER 2 ?
+FSH_SEGREALLOC FSHELPER 3 ?
+FSH_FORCENOSWAP FSHELPER 4 ?
+FSH_INTERR FSHELPER 5 ?
+FSH_SEMREQUEST FSHELPER 6 ?
+FSH_SEMCLEAR FSHELPER 7 ?
+FSH_PROBEBUF FSHELPER 8 ?
+FSH_GETPRIORITY FSHELPER 9 ?
+FSH_IOSEMCLEAR FSHELPER 10 ?
+FSH_FLUSHBUF FSHELPER 11 ?
+FSH_CRITERROR FSHELPER 12 ?
+FSH_DEVIOCTL FSHELPER 13 ?
+FSH_GETVOLPARM FSHELPER 14 ?
+FSH_FINDDUPHVPB FSHELPER 15 ?
+FSH_DOVOLIO FSHELPER 16 ?
+FSH_ADDSHARE FSHELPER 17 ?
+FSH_REMOVESHARE FSHELPER 18 ?
+FSH_GETOVERLAPBUF FSHELPER 19 ?
+FSH_ISCURDIRPREFIX FSHELPER 20 ?
+FSH_LOADCHAR FSHELPER 21 ?
+FSH_PREVCHAR FSHELPER 22 ?
+FSH_STORECHAR FSHELPER 23 ?
+FSH_SEMSET FSHELPER 24 ?
+FSH_SEMSETWAIT FSHELPER 25 ?
+FSH_SEMWAIT FSHELPER 26 ?
+FSH_WILDMATCH FSHELPER 27 ?
+FSH_YIELD FSHELPER 28 ?
+FSH_DOVOLIO2 FSHELPER 29 ?
+FSH_FINDCHAR FSHELPER 30 ?
+MFSH_SETBOOTDRIVE FSHELPER 31 ?
+FSH_CANONICALIZE FSHELPER 32 ?
+MFSH_DOVOLIO FSHELPER 33 ?
+MFSH_INTERR FSHELPER 34 ?
+MFSH_SEGALLOC FSHELPER 35 ?
+MFSH_SEGFREE FSHELPER 36 ?
+MFSH_SEGREALLOC FSHELPER 37 ?
+MFSH_CALLRM FSHELPER 38 ?
+MFSH_LOCK FSHELPER 39 ?
+MFSH_PHYSTOVIRT FSHELPER 40 ?
+MFSH_UNLOCK FSHELPER 41 ?
+MFSH_UNPHYSTOVIRT FSHELPER 42 ?
+MFSH_VIRT2PHYS FSHELPER 43 ?
+FSH_QSYSINFO FSHELPER 44 ?
+FSH_NAMEFROMSFN FSHELPER 45 ?
+FSH_UPPERCASE FSHELPER 46 ?
+FSH_CHECKEANAME FSHELPER 47 ?
+FSH_CALLDRIVER FSHELPER 48 ?
+FSH_SETVOLUME FSHELPER 49 ?
+FSH_STACKSPACE FSHELPER 50 ?
+FSH_REGISTERPERFCTRS FSHELPER 51 ?
+FSH_IOBOOST FSHELPER 52 ?
+FSH_QUERYSERVERTHREAD FSHELPER 53 ?
+FSH_QUERYOPLOCK FSHELPER 54 ?
+FSH_EXTENDTIMESLICE FSHELPER 55 ?
+
diff --git a/src/VBox/Runtime/r0drv/os2/process-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/process-r0drv-os2.cpp
new file mode 100644
index 00000000..585f1143
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/process-r0drv-os2.cpp
@@ -0,0 +1,54 @@
+/* $Id: process-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Process Management, Ring-0 Driver, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+
+#include <iprt/process.h>
+#include <iprt/assert.h>
+
+
+RTDECL(RTPROCESS) RTProcSelf(void)
+{
+ PLINFOSEG pLIS = (PLINFOSEG)RTR0Os2Virt2Flat(g_fpLIS);
+ AssertReturn(pLIS, NIL_RTPROCESS);
+ return pLIS->pidCurrent;
+}
+
+
+RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void)
+{
+ /* make this ptda later... */
+ return (RTR0PROCESS)RTProcSelf();
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/semevent-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/semevent-r0drv-os2.cpp
new file mode 100644
index 00000000..98950c6a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/semevent-r0drv-os2.cpp
@@ -0,0 +1,271 @@
+/* $Id: semevent-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Single Release Event Semaphores, Ring-0 Driver, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/semaphore.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/lockvalidator.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * OS/2 event semaphore.
+ */
+typedef struct RTSEMEVENTINTERNAL
+{
+ /** Magic value (RTSEMEVENT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The number of waiting threads. */
+ uint32_t volatile cWaiters;
+ /** Set if the event object is signaled. */
+ uint8_t volatile fSignaled;
+ /** The number of threads in the process of waking up. */
+ uint32_t volatile cWaking;
+ /** The OS/2 spinlock protecting this structure. */
+ SpinLock_t Spinlock;
+} RTSEMEVENTINTERNAL, *PRTSEMEVENTINTERNAL;
+
+
+RTDECL(int) RTSemEventCreate(PRTSEMEVENT phEventSem)
+{
+ return RTSemEventCreateEx(phEventSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...)
+{
+ AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER);
+ Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL));
+ AssertCompile(sizeof(RTSEMEVENTINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phEventSem, VERR_INVALID_POINTER);
+ RT_NOREF(hClass, pszNameFmt);
+
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ pThis->u32Magic = RTSEMEVENT_MAGIC;
+ pThis->cWaiters = 0;
+ pThis->cWaking = 0;
+ pThis->fSignaled = 0;
+ KernAllocSpinLock(&pThis->Spinlock);
+
+ *phEventSem = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem)
+{
+ PRTSEMEVENTINTERNAL pThis = hEventSem;
+ if (pThis == NIL_RTSEMEVENT)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+
+ KernAcquireSpinLock(&pThis->Spinlock);
+ ASMAtomicIncU32(&pThis->u32Magic); /* make the handle invalid */
+ if (pThis->cWaiters > 0)
+ {
+ /* abort waiting thread, last man cleans up. */
+ ASMAtomicXchgU32(&pThis->cWaking, pThis->cWaking + pThis->cWaiters);
+ ULONG cThreads;
+ KernWakeup((ULONG)pThis, WAKEUP_DATA | WAKEUP_BOOST, &cThreads, (ULONG)VERR_SEM_DESTROYED);
+ KernReleaseSpinLock(&pThis->Spinlock);
+ }
+ else if (pThis->cWaking)
+ /* the last waking thread is gonna do the cleanup */
+ KernReleaseSpinLock(&pThis->Spinlock);
+ else
+ {
+ KernReleaseSpinLock(&pThis->Spinlock);
+ KernFreeSpinLock(&pThis->Spinlock);
+ RTMemFree(pThis);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem)
+{
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+
+ KernAcquireSpinLock(&pThis->Spinlock);
+
+ if (pThis->cWaiters > 0)
+ {
+ ASMAtomicDecU32(&pThis->cWaiters);
+ ASMAtomicIncU32(&pThis->cWaking);
+ ULONG cThreads;
+ KernWakeup((ULONG)pThis, WAKEUP_DATA | WAKEUP_ONE, &cThreads, VINF_SUCCESS);
+ if (RT_UNLIKELY(!cThreads))
+ {
+ /* shouldn't ever happen on OS/2 */
+ ASMAtomicXchgU8(&pThis->fSignaled, true);
+ ASMAtomicDecU32(&pThis->cWaking);
+ ASMAtomicIncU32(&pThis->cWaiters);
+ }
+ }
+ else
+ ASMAtomicXchgU8(&pThis->fSignaled, true);
+
+ KernReleaseSpinLock(&pThis->Spinlock);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventWaitEx.
+ * @param uTimeout See RTSemEventWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventOs2Wait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ /*
+ * Validate and convert input.
+ */
+ if (!pThis)
+ return VERR_INVALID_HANDLE;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+
+ ULONG cMsTimeout = rtR0SemWaitOs2ConvertTimeout(fFlags, uTimeout);
+ ULONG fBlock = BLOCK_SPINLOCK;
+ if (!(fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE))
+ fBlock |= BLOCK_UNINTERRUPTABLE;
+
+ /*
+ * Do the job.
+ */
+ KernAcquireSpinLock(&pThis->Spinlock);
+
+ int rc;
+ if (pThis->fSignaled)
+ {
+ Assert(!pThis->cWaiters);
+ ASMAtomicXchgU8(&pThis->fSignaled, false);
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ ASMAtomicIncU32(&pThis->cWaiters);
+
+ ULONG ulData = (ULONG)VERR_INTERNAL_ERROR;
+ rc = KernBlock((ULONG)pThis, cMsTimeout, fBlock,
+ &pThis->Spinlock,
+ &ulData);
+ switch (rc)
+ {
+ case NO_ERROR:
+ rc = (int)ulData;
+ Assert(rc == VINF_SUCCESS || rc == VERR_SEM_DESTROYED);
+ Assert(pThis->cWaking > 0);
+ if ( !ASMAtomicDecU32(&pThis->cWaking)
+ && pThis->u32Magic != RTSEMEVENT_MAGIC)
+ {
+ /* The event was destroyed (ulData == VINF_SUCCESS if it was after we awoke), as
+ the last thread do the cleanup. */
+ KernReleaseSpinLock(&pThis->Spinlock);
+ KernFreeSpinLock(&pThis->Spinlock);
+ RTMemFree(pThis);
+ return rc;
+ }
+ break;
+
+ case ERROR_TIMEOUT:
+ Assert(cMsTimeout != SEM_INDEFINITE_WAIT);
+ ASMAtomicDecU32(&pThis->cWaiters);
+ rc = VERR_TIMEOUT;
+ break;
+
+ case ERROR_INTERRUPT:
+ Assert(fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE);
+ ASMAtomicDecU32(&pThis->cWaiters);
+ rc = VERR_INTERRUPTED;
+ break;
+
+ default:
+ AssertMsgFailed(("rc=%d\n", rc));
+ rc = VERR_GENERAL_FAILURE;
+ break;
+ }
+ }
+
+ KernReleaseSpinLock(&pThis->Spinlock);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventWaitEx(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventOs2Wait(hEventSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventOs2Wait(hEventSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+
+
+RTDECL(int) RTSemEventWaitExDebug(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventOs2Wait(hEventSem, fFlags, uTimeout, &SrcPos);
+}
+
+
+RTDECL(uint32_t) RTSemEventGetResolution(void)
+{
+ return 32000000; /* 32ms */
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/semeventmulti-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/semeventmulti-r0drv-os2.cpp
new file mode 100644
index 00000000..c7f8bfbd
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/semeventmulti-r0drv-os2.cpp
@@ -0,0 +1,281 @@
+/* $Id: semeventmulti-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Multiple Release Event Semaphores, Ring-0 Driver, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/semaphore.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * OS/2 multiple release event semaphore.
+ */
+typedef struct RTSEMEVENTMULTIINTERNAL
+{
+ /** Magic value (RTSEMEVENTMULTI_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The number of waiting threads. */
+ uint32_t volatile cWaiters;
+ /** Set if the event object is signaled. */
+ uint8_t volatile fSignaled;
+ /** The number of threads in the process of waking up. */
+ uint32_t volatile cWaking;
+ /** The OS/2 spinlock protecting this structure. */
+ SpinLock_t Spinlock;
+} RTSEMEVENTMULTIINTERNAL, *PRTSEMEVENTMULTIINTERNAL;
+
+
+RTDECL(int) RTSemEventMultiCreate(PRTSEMEVENTMULTI phEventMultiSem)
+{
+ return RTSemEventMultiCreateEx(phEventMultiSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass,
+ const char *pszNameFmt, ...)
+{
+ AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phEventMultiSem, VERR_INVALID_POINTER);
+
+ AssertCompile(sizeof(RTSEMEVENTMULTIINTERNAL) > sizeof(void *));
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMEVENTMULTI_MAGIC;
+ pThis->cWaiters = 0;
+ pThis->cWaking = 0;
+ pThis->fSignaled = 0;
+ KernAllocSpinLock(&pThis->Spinlock);
+
+ *phEventMultiSem = pThis;
+ return VINF_SUCCESS;
+ }
+ RT_NOREF(hClass, pszNameFmt);
+ return VERR_NO_MEMORY;
+}
+
+
+RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (pThis == NIL_RTSEMEVENTMULTI)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ KernAcquireSpinLock(&pThis->Spinlock);
+ ASMAtomicIncU32(&pThis->u32Magic); /* make the handle invalid */
+ if (pThis->cWaiters > 0)
+ {
+ /* abort waiting thread, last man cleans up. */
+ ASMAtomicXchgU32(&pThis->cWaking, pThis->cWaking + pThis->cWaiters);
+ ULONG cThreads;
+ KernWakeup((ULONG)pThis, WAKEUP_DATA | WAKEUP_BOOST, &cThreads, (ULONG)VERR_SEM_DESTROYED);
+ KernReleaseSpinLock(&pThis->Spinlock);
+ }
+ else if (pThis->cWaking)
+ /* the last waking thread is gonna do the cleanup */
+ KernReleaseSpinLock(&pThis->Spinlock);
+ else
+ {
+ KernReleaseSpinLock(&pThis->Spinlock);
+ KernFreeSpinLock(&pThis->Spinlock);
+ RTMemFree(pThis);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC,
+ ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_HANDLE);
+
+ KernAcquireSpinLock(&pThis->Spinlock);
+
+ ASMAtomicXchgU8(&pThis->fSignaled, true);
+ if (pThis->cWaiters > 0)
+ {
+ ASMAtomicXchgU32(&pThis->cWaking, pThis->cWaking + pThis->cWaiters);
+ ASMAtomicXchgU32(&pThis->cWaiters, 0);
+ ULONG cThreads;
+ KernWakeup((ULONG)pThis, WAKEUP_DATA, &cThreads, VINF_SUCCESS);
+ }
+
+ KernReleaseSpinLock(&pThis->Spinlock);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC,
+ ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_HANDLE);
+
+ KernAcquireSpinLock(&pThis->Spinlock);
+ ASMAtomicXchgU8(&pThis->fSignaled, false);
+ KernReleaseSpinLock(&pThis->Spinlock);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventWaitEx.
+ * @param uTimeout See RTSemEventWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventMultiOs2Wait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ RT_NOREF(pSrcPos);
+
+ /*
+ * Validate and convert the input.
+ */
+ if (!pThis)
+ return VERR_INVALID_HANDLE;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC,
+ ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_HANDLE);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+
+ ULONG cMsTimeout = rtR0SemWaitOs2ConvertTimeout(fFlags, uTimeout);
+ ULONG fBlock = BLOCK_SPINLOCK;
+ if (!(fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE))
+ fBlock |= BLOCK_UNINTERRUPTABLE;
+
+ /*
+ * Do the job.
+ */
+ KernAcquireSpinLock(&pThis->Spinlock);
+
+ int rc;
+ if (pThis->fSignaled)
+ rc = VINF_SUCCESS;
+ else
+ {
+ ASMAtomicIncU32(&pThis->cWaiters);
+
+ ULONG ulData = (ULONG)VERR_INTERNAL_ERROR;
+ rc = KernBlock((ULONG)pThis, cMsTimeout, fBlock,
+ &pThis->Spinlock,
+ &ulData);
+ switch (rc)
+ {
+ case NO_ERROR:
+ rc = (int)ulData;
+ Assert(rc == VINF_SUCCESS || rc == VERR_SEM_DESTROYED);
+ Assert(pThis->cWaking > 0);
+ if ( !ASMAtomicDecU32(&pThis->cWaking)
+ && pThis->u32Magic != RTSEMEVENTMULTI_MAGIC)
+ {
+ /* The event was destroyed (ulData == VINF_SUCCESS if it was after we awoke), as
+ the last thread do the cleanup. */
+ KernReleaseSpinLock(&pThis->Spinlock);
+ KernFreeSpinLock(&pThis->Spinlock);
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+ }
+ rc = VINF_SUCCESS;
+ break;
+
+ case ERROR_TIMEOUT:
+ Assert(cMsTimeout != SEM_INDEFINITE_WAIT);
+ ASMAtomicDecU32(&pThis->cWaiters);
+ rc = VERR_TIMEOUT;
+ break;
+
+ case ERROR_INTERRUPT:
+ Assert(fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE);
+ ASMAtomicDecU32(&pThis->cWaiters);
+ rc = VERR_INTERRUPTED;
+ break;
+
+ default:
+ AssertMsgFailed(("rc=%d\n", rc));
+ rc = VERR_GENERAL_FAILURE;
+ break;
+ }
+ }
+
+ KernReleaseSpinLock(&pThis->Spinlock);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventMultiWaitEx(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventMultiOs2Wait(hEventMultiSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventMultiOs2Wait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+
+
+RTDECL(int) RTSemEventMultiWaitExDebug(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventMultiOs2Wait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+}
+
+
+RTDECL(uint32_t) RTSemEventMultiGetResolution(void)
+{
+ return 32000000; /* 32ms */
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/semfastmutex-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/semfastmutex-r0drv-os2.cpp
new file mode 100644
index 00000000..c00a4233
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/semfastmutex-r0drv-os2.cpp
@@ -0,0 +1,115 @@
+/* $Id: semfastmutex-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Fast Mutex Semaphores, Ring-0 Driver, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+
+#include <iprt/semaphore.h>
+#include <iprt/errcore.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the OS/2 KEE mutex semaphore.
+ */
+typedef struct RTSEMFASTMUTEXINTERNAL
+{
+ /** Magic value (RTSEMFASTMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** The KEE mutex. */
+ MutexLock_t Mtx;
+} RTSEMFASTMUTEXINTERNAL, *PRTSEMFASTMUTEXINTERNAL;
+
+
+RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx)
+{
+ AssertCompile(sizeof(RTSEMFASTMUTEXINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phFastMtx, VERR_INVALID_POINTER);
+
+ PRTSEMFASTMUTEXINTERNAL pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMFASTMUTEX_MAGIC;
+ KernAllocMutexLock(&pThis->Mtx);
+
+ *phFastMtx = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ if (pThis == NIL_RTSEMFASTMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD);
+ KernFreeMutexLock(&pThis->Mtx);
+ RTMemFree(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRequest(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ KernRequestExclusiveMutex(&pThis->Mtx);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+
+ KernReleaseExclusiveMutex(&pThis->Mtx);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/spinlock-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/spinlock-r0drv-os2.cpp
new file mode 100644
index 00000000..a83f8839
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/spinlock-r0drv-os2.cpp
@@ -0,0 +1,131 @@
+/* $Id: spinlock-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Spinlocks, Ring-0 Driver, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+
+#include <iprt/spinlock.h>
+#include <iprt/errcore.h>
+#include <iprt/alloc.h>
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#ifdef RT_STRICT
+# include <iprt/asm-amd64-x86.h>
+#endif
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the SpinLock_t type.
+ */
+typedef struct RTSPINLOCKINTERNAL
+{
+ /** Spinlock magic value (RTSPINLOCK_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** Spinlock creation flags. */
+ uint32_t fFlags;
+ /** The OS/2 spinlock structure. */
+ SpinLock_t Spinlock;
+} RTSPINLOCKINTERNAL, *PRTSPINLOCKINTERNAL;
+
+
+RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName)
+{
+ AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER);
+ RT_NOREF(pszName);
+
+ /*
+ * Allocate.
+ */
+ AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *));
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize & return.
+ */
+ pThis->u32Magic = RTSPINLOCK_MAGIC;
+ pThis->fFlags = fFlags;
+ KernAllocSpinLock(&pThis->Spinlock);
+ *pSpinlock = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSpinlockDestroy(RTSPINLOCK Spinlock)
+{
+ /*
+ * Validate input.
+ */
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertMsgReturn(pThis->u32Magic == RTSPINLOCK_MAGIC,
+ ("Invalid spinlock %p magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Make the lock invalid and release the memory.
+ */
+ ASMAtomicIncU32(&pThis->u32Magic);
+ KernFreeSpinLock(&pThis->Spinlock);
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+
+ KernAcquireSpinLock(&pThis->Spinlock);
+ Assert(!ASMIntAreEnabled()); /** @todo verify that interrupts are disabled. */
+}
+
+
+RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+
+ KernReleaseSpinLock(&pThis->Spinlock);
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/the-os2-kernel.h b/src/VBox/Runtime/r0drv/os2/the-os2-kernel.h
new file mode 100644
index 00000000..bd4a0c94
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/the-os2-kernel.h
@@ -0,0 +1,59 @@
+/* $Id: the-os2-kernel.h $ */
+/** @file
+ * IPRT - Ring-0 Driver, The OS/2 Kernel Headers.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_os2_the_os2_kernel_h
+#define IPRT_INCLUDED_SRC_r0drv_os2_the_os2_kernel_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/types.h>
+
+#define INCL_ERRORS
+#define INCL_DOSSEMAPHORES /* for SEM_INDEFINITE_WAIT */
+#undef RT_MAX
+#include <os2ddk/bsekee.h>
+#include <os2ddk/devhlp.h>
+#undef RT_MAX
+
+RT_C_DECLS_BEGIN
+
+extern PCDOSTABLE g_pDosTable;
+extern PCDOSTABLE2 g_pDosTable2;
+extern PGINFOSEG g_pGIS;
+extern RTFAR16 g_fpLIS;
+
+RTR0DECL(void *) RTR0Os2Virt2Flat(RTFAR16 fp);
+DECLASM(int) RTR0Os2DHQueryDOSVar(uint8_t iVar, uint16_t iSub, PRTFAR16 pfp);
+DECLASM(int) RTR0Os2DHVMGlobalToProcess(ULONG fFlags, PVOID pvR0, ULONG cb, PPVOID ppvR3);
+
+RT_C_DECLS_END
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_os2_the_os2_kernel_h */
diff --git a/src/VBox/Runtime/r0drv/os2/thread-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/thread-r0drv-os2.cpp
new file mode 100644
index 00000000..5910d0c3
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/thread-r0drv-os2.cpp
@@ -0,0 +1,191 @@
+/* $Id: thread-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Threads (Part 1), Ring-0 Driver, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mp.h>
+#include "internal/thread.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Per-cpu preemption counters. */
+static int32_t volatile g_acPreemptDisabled[256];
+
+
+
+RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
+{
+ PLINFOSEG pLIS = (PLINFOSEG)RTR0Os2Virt2Flat(g_fpLIS);
+ AssertMsgReturn(pLIS, ("g_fpLIS=%04x:%04x - logging too early again?\n", g_fpLIS.sel, g_fpLIS.off), NIL_RTNATIVETHREAD);
+ return pLIS->tidCurrent | (pLIS->pidCurrent << 16);
+}
+
+
+static int rtR0ThreadOs2SleepCommon(RTMSINTERVAL cMillies)
+{
+ int rc = KernBlock((ULONG)RTThreadSleep,
+ cMillies == RT_INDEFINITE_WAIT ? SEM_INDEFINITE_WAIT : cMillies,
+ 0, NULL, NULL);
+ switch (rc)
+ {
+ case NO_ERROR:
+ return VINF_SUCCESS;
+ case ERROR_TIMEOUT:
+ return VERR_TIMEOUT;
+ case ERROR_INTERRUPT:
+ return VERR_INTERRUPTED;
+ default:
+ AssertMsgFailed(("%d\n", rc));
+ return VERR_NO_TRANSLATION;
+ }
+}
+
+
+RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadOs2SleepCommon(cMillies);
+}
+
+
+RTDECL(int) RTThreadSleepNoBlock(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadOs2SleepCommon(cMillies);
+}
+
+
+RTDECL(bool) RTThreadYield(void)
+{
+ /** @todo implement me (requires a devhelp) */
+ return false;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+ int32_t c = g_acPreemptDisabled[ASMGetApicId()];
+ AssertMsg(c >= 0 && c < 32, ("%d\n", c));
+ return c == 0
+ && ASMIntAreEnabled();
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+
+ union
+ {
+ RTFAR16 fp;
+ uint8_t fResched;
+ } u;
+ int rc = RTR0Os2DHQueryDOSVar(DHGETDOSV_YIELDFLAG, 0, &u.fp);
+ AssertReturn(rc == 0, false);
+ if (u.fResched)
+ return true;
+
+ /** @todo Check if DHGETDOSV_YIELDFLAG includes TCYIELDFLAG. */
+ rc = RTR0Os2DHQueryDOSVar(DHGETDOSV_TCYIELDFLAG, 0, &u.fp);
+ AssertReturn(rc == 0, false);
+ if (u.fResched)
+ return true;
+ return false;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
+{
+ /* yes, RTThreadPreemptIsPending is reliable. */
+ return true;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPossible(void)
+{
+ /* no kernel preemption on OS/2. */
+ return false;
+}
+
+
+RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ Assert(pState->u32Reserved == 0);
+
+ /* No preemption on OS/2, so do our own accounting. */
+ int32_t c = ASMAtomicIncS32(&g_acPreemptDisabled[ASMGetApicId()]);
+ AssertMsg(c > 0 && c < 32, ("%d\n", c));
+ pState->u32Reserved = c;
+ RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
+}
+
+
+RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ AssertMsg(pState->u32Reserved > 0 && pState->u32Reserved < 32, ("%d\n", pState->u32Reserved));
+ RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
+
+ /* No preemption on OS/2, so do our own accounting. */
+ int32_t volatile *pc = &g_acPreemptDisabled[ASMGetApicId()];
+ AssertMsg(pState->u32Reserved == (uint32_t)*pc, ("uchDummy=%d *pc=%d \n", pState->u32Reserved, *pc));
+ ASMAtomicUoWriteS32(pc, pState->u32Reserved - 1);
+ pState->u32Reserved = 0;
+}
+
+
+RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD); NOREF(hThread);
+
+ union
+ {
+ RTFAR16 fp;
+ uint8_t cInterruptLevel;
+ } u;
+ /** @todo OS/2: verify the usage of DHGETDOSV_INTERRUPTLEV. */
+ int rc = RTR0Os2DHQueryDOSVar(DHGETDOSV_INTERRUPTLEV, 0, &u.fp);
+ AssertReturn(rc == 0, true);
+
+ return u.cInterruptLevel > 0;
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/thread2-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/thread2-r0drv-os2.cpp
new file mode 100644
index 00000000..41c2634e
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/thread2-r0drv-os2.cpp
@@ -0,0 +1,86 @@
+/* $Id: thread2-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Threads (Part 2), Ring-0 Driver, Generic Stubs.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+
+#include <iprt/thread.h>
+#include <iprt/errcore.h>
+#include "internal/thread.h"
+
+
+DECLHIDDEN(int) rtThreadNativeInit(void)
+{
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(RTTHREAD) RTThreadSelf(void)
+{
+ return rtThreadGetByNative(RTThreadNativeSelf());
+}
+
+
+DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
+{
+ NOREF(pThread);
+ NOREF(enmType);
+ return VERR_NOT_IMPLEMENTED;
+}
+
+
+DECLHIDDEN(int) rtThreadNativeAdopt(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+ return VERR_NOT_IMPLEMENTED;
+}
+
+DECLHIDDEN(void) rtThreadNativeWaitKludge(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+}
+
+
+DECLHIDDEN(void) rtThreadNativeDestroy(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+}
+
+
+DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
+{
+ NOREF(pNativeThread);
+ NOREF(pThreadInt);
+ return VERR_NOT_IMPLEMENTED;
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/time-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/time-r0drv-os2.cpp
new file mode 100644
index 00000000..de749de8
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/time-r0drv-os2.cpp
@@ -0,0 +1,92 @@
+/* $Id: time-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Time, Ring-0 Driver, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+
+#include <iprt/time.h>
+
+
+RTDECL(uint64_t) RTTimeNanoTS(void)
+{
+ /** @remark OS/2 Ring-0: will wrap after 48 days. */
+ return g_pGIS->msecs * UINT64_C(1000000);
+}
+
+
+RTDECL(uint64_t) RTTimeMilliTS(void)
+{
+ /** @remark OS/2 Ring-0: will wrap after 48 days. */
+ return g_pGIS->msecs;
+}
+
+
+RTDECL(uint64_t) RTTimeSystemNanoTS(void)
+{
+ /** @remark OS/2 Ring-0: will wrap after 48 days. */
+ return g_pGIS->msecs * UINT64_C(1000000);
+}
+
+
+RTDECL(uint64_t) RTTimeSystemMilliTS(void)
+{
+ /** @remark OS/2 Ring-0: will wrap after 48 days. */
+ return g_pGIS->msecs;
+}
+
+
+RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
+{
+ /*
+ * Get the seconds since the unix epoch (local time) and current hundredths.
+ */
+ GINFOSEG volatile *pGIS = (GINFOSEG volatile *)g_pGIS;
+ UCHAR uchHundredths;
+ ULONG ulSeconds;
+ do
+ {
+ uchHundredths = pGIS->hundredths;
+ ulSeconds = pGIS->time;
+ } while ( uchHundredths == pGIS->hundredths
+ && ulSeconds == pGIS->time);
+
+ /*
+ * Combine the two and convert to UCT (later).
+ */
+ uint64_t u64 = ulSeconds * UINT64_C(1000000000) + (uint32_t)uchHundredths * 10000000;
+ /** @todo convert from local to UCT. */
+
+ /** @remark OS/2 Ring-0: Currently returns local time instead of UCT. */
+ return RTTimeSpecSetNano(pTime, u64);
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/timer-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/timer-r0drv-os2.cpp
new file mode 100644
index 00000000..08f8bc55
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/timer-r0drv-os2.cpp
@@ -0,0 +1,385 @@
+/* $Id: timer-r0drv-os2.cpp $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver, OS/2.
+ */
+
+/*
+ * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-os2-kernel.h"
+
+#include <iprt/timer.h>
+#include <iprt/time.h>
+#include <iprt/spinlock.h>
+#include <iprt/err.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/alloc.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The internal representation of an OS/2 timer handle.
+ */
+typedef struct RTTIMER
+{
+ /** Magic.
+ * This is RTTIMER_MAGIC, but changes to something else before the timer
+ * is destroyed to indicate clearly that thread should exit. */
+ uint32_t volatile u32Magic;
+ /** The next timer in the timer list. */
+ PRTTIMER pNext;
+ /** Flag indicating the timer is suspended. */
+ uint8_t volatile fSuspended;
+ /** Cleared at the start of timer processing, set when calling pfnTimer.
+ * If any timer changes occurs while doing the callback this will be used to resume the cycle. */
+ bool fDone;
+ /** Callback. */
+ PFNRTTIMER pfnTimer;
+ /** User argument. */
+ void *pvUser;
+ /** The timer interval. 0 if one-shot. */
+ uint64_t u64NanoInterval;
+ /** The start of the current run.
+ * This is used to calculate when the timer ought to fire the next time. */
+ uint64_t volatile u64StartTS;
+ /** The start of the current run.
+ * This is used to calculate when the timer ought to fire the next time. */
+ uint64_t volatile u64NextTS;
+ /** The current tick number (since u64StartTS). */
+ uint64_t volatile iTick;
+} RTTIMER;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Spinlock protecting the timers. */
+static RTSPINLOCK g_Spinlock = NIL_RTSPINLOCK;
+/** The timer head. */
+static PRTTIMER volatile g_pTimerHead = NULL;
+/** The number of active timers. */
+static uint32_t volatile g_cActiveTimers = 0;
+/** The number of active timers. */
+static uint32_t volatile g_cTimers = 0;
+/** The change number.
+ * This is used to detect list changes during the timer callback loop. */
+static uint32_t volatile g_u32ChangeNo;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+RT_C_DECLS_BEGIN
+DECLASM(void) rtTimerOs2Tick(void);
+DECLASM(int) rtTimerOs2Arm(void);
+DECLASM(int) rtTimerOs2Dearm(void);
+RT_C_DECLS_END
+
+
+
+RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
+{
+ *ppTimer = NULL;
+
+ /*
+ * We don't support the fancy MP features.
+ */
+ if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Lazy initialize the spinlock.
+ */
+ if (g_Spinlock == NIL_RTSPINLOCK)
+ {
+ RTSPINLOCK Spinlock;
+ int rc = RTSpinlockCreate(&Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTTimerOS2");
+ AssertRCReturn(rc, rc);
+ //bool fRc;
+ //ASMAtomicCmpXchgSize(&g_Spinlock, Spinlock, NIL_RTSPINLOCK, fRc);
+ //if (!fRc)
+ if (!ASMAtomicCmpXchgPtr((void * volatile *)&g_Spinlock, Spinlock, NIL_RTSPINLOCK))
+ RTSpinlockDestroy(Spinlock);
+ }
+
+ /*
+ * Allocate and initialize the timer handle.
+ */
+ PRTTIMER pTimer = (PRTTIMER)RTMemAlloc(sizeof(*pTimer));
+ if (!pTimer)
+ return VERR_NO_MEMORY;
+
+ pTimer->u32Magic = RTTIMER_MAGIC;
+ pTimer->pNext = NULL;
+ pTimer->fSuspended = true;
+ pTimer->pfnTimer = pfnTimer;
+ pTimer->pvUser = pvUser;
+ pTimer->u64NanoInterval = u64NanoInterval;
+ pTimer->u64StartTS = 0;
+
+ /*
+ * Insert the timer into the list (LIFO atm).
+ */
+ RTSpinlockAcquire(g_Spinlock);
+ g_u32ChangeNo++;
+ pTimer->pNext = g_pTimerHead;
+ g_pTimerHead = pTimer;
+ g_cTimers++;
+ RTSpinlockRelease(g_Spinlock);
+
+ *ppTimer = pTimer;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Validates the timer handle.
+ *
+ * @returns true if valid, false if invalid.
+ * @param pTimer The handle.
+ */
+DECLINLINE(bool) rtTimerIsValid(PRTTIMER pTimer)
+{
+ AssertReturn(VALID_PTR(pTimer), false);
+ AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, false);
+ return true;
+}
+
+
+RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
+{
+ /* It's ok to pass NULL pointer. */
+ if (pTimer == /*NIL_RTTIMER*/ NULL)
+ return VINF_SUCCESS;
+ if (!rtTimerIsValid(pTimer))
+ return VERR_INVALID_HANDLE;
+
+ /*
+ * Remove it from the list.
+ */
+ RTSpinlockAcquire(g_Spinlock);
+ g_u32ChangeNo++;
+ if (g_pTimerHead == pTimer)
+ g_pTimerHead = pTimer->pNext;
+ else
+ {
+ PRTTIMER pPrev = g_pTimerHead;
+ while (pPrev->pNext != pTimer)
+ {
+ pPrev = pPrev->pNext;
+ if (RT_UNLIKELY(!pPrev))
+ {
+ RTSpinlockRelease(g_Spinlock);
+ return VERR_INVALID_HANDLE;
+ }
+ }
+ pPrev->pNext = pTimer->pNext;
+ }
+ Assert(g_cTimers > 0);
+ g_cTimers--;
+ if (!pTimer->fSuspended)
+ {
+ Assert(g_cActiveTimers > 0);
+ g_cActiveTimers--;
+ if (!g_cActiveTimers)
+ rtTimerOs2Dearm();
+ }
+ RTSpinlockRelease(g_Spinlock);
+
+ /*
+ * Free the associated resources.
+ */
+ pTimer->u32Magic++;
+ RTMemFree(pTimer);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
+{
+ if (!rtTimerIsValid(pTimer))
+ return VERR_INVALID_HANDLE;
+ if (!pTimer->fSuspended)
+ return VERR_TIMER_ACTIVE;
+
+ /*
+ * Calc when it should start firing and give the thread a kick so it get going.
+ */
+ u64First += RTTimeNanoTS();
+
+ RTSpinlockAcquire(g_Spinlock);
+ g_u32ChangeNo++;
+ if (!g_cActiveTimers)
+ {
+ int rc = rtTimerOs2Arm();
+ if (RT_FAILURE(rc))
+ {
+ RTSpinlockRelease(g_Spinlock);
+ return rc;
+ }
+ }
+ g_cActiveTimers++;
+ pTimer->fSuspended = false;
+ pTimer->fDone = true; /* next tick, not current! */
+ pTimer->iTick = 0;
+ pTimer->u64StartTS = u64First;
+ pTimer->u64NextTS = u64First;
+ RTSpinlockRelease(g_Spinlock);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerStop(PRTTIMER pTimer)
+{
+ if (!rtTimerIsValid(pTimer))
+ return VERR_INVALID_HANDLE;
+ if (pTimer->fSuspended)
+ return VERR_TIMER_SUSPENDED;
+
+ /*
+ * Suspend the timer.
+ */
+ RTSpinlockAcquire(g_Spinlock);
+ g_u32ChangeNo++;
+ pTimer->fSuspended = true;
+ Assert(g_cActiveTimers > 0);
+ g_cActiveTimers--;
+ if (!g_cActiveTimers)
+ rtTimerOs2Dearm();
+ RTSpinlockRelease(g_Spinlock);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
+{
+ if (!rtTimerIsValid(pTimer))
+ return VERR_INVALID_HANDLE;
+ RT_NOREF(u64NanoInterval);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLASM(void) rtTimerOs2Tick(void)
+{
+ /*
+ * Query the current time and then take the lock.
+ */
+ const uint64_t u64NanoTS = RTTimeNanoTS();
+
+ RTSpinlockAcquire(g_Spinlock);
+
+ /*
+ * Clear the fDone flag.
+ */
+ PRTTIMER pTimer;
+ for (pTimer = g_pTimerHead; pTimer; pTimer = pTimer->pNext)
+ pTimer->fDone = false;
+
+ /*
+ * Walk the timer list and do the callbacks for any active timer.
+ */
+ uint32_t u32CurChangeNo = g_u32ChangeNo;
+ pTimer = g_pTimerHead;
+ while (pTimer)
+ {
+ PRTTIMER pNext = pTimer->pNext;
+ if ( !pTimer->fSuspended
+ && !pTimer->fDone
+ && pTimer->u64NextTS <= u64NanoTS)
+ {
+ pTimer->fDone = true;
+ pTimer->iTick++;
+
+ /* calculate the next timeout */
+ if (!pTimer->u64NanoInterval)
+ pTimer->fSuspended = true;
+ else
+ {
+ pTimer->u64NextTS = pTimer->u64StartTS + pTimer->iTick * pTimer->u64NanoInterval;
+ if (pTimer->u64NextTS < u64NanoTS)
+ pTimer->u64NextTS = u64NanoTS + RTTimerGetSystemGranularity() / 2;
+ }
+
+ /* do the callout */
+ PFNRTTIMER pfnTimer = pTimer->pfnTimer;
+ void *pvUser = pTimer->pvUser;
+ RTSpinlockRelease(g_Spinlock);
+ pfnTimer(pTimer, pvUser, pTimer->iTick);
+
+ RTSpinlockAcquire(g_Spinlock);
+
+ /* check if anything changed. */
+ if (u32CurChangeNo != g_u32ChangeNo)
+ {
+ u32CurChangeNo = g_u32ChangeNo;
+ pNext = g_pTimerHead;
+ }
+ }
+
+ /* next */
+ pTimer = pNext;
+ }
+
+ RTSpinlockRelease(g_Spinlock);
+}
+
+
+RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
+{
+ return 32000000; /* 32ms */
+}
+
+
+RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
+{
+ RT_NOREF(u32Request, pu32Granted);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
+{
+ RT_NOREF(u32Granted);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTDECL(bool) RTTimerCanDoHighResolution(void)
+{
+ return false;
+}
+
diff --git a/src/VBox/Runtime/r0drv/os2/timerA-r0drv-os2.asm b/src/VBox/Runtime/r0drv/os2/timerA-r0drv-os2.asm
new file mode 100644
index 00000000..474f8576
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/os2/timerA-r0drv-os2.asm
@@ -0,0 +1,218 @@
+; $Id: timerA-r0drv-os2.asm $
+;; @file
+; IPRT - DevHelp_VMGlobalToProcess, Ring-0 Driver, OS/2.
+;
+
+;
+; Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
+;
+; Permission is hereby granted, free of charge, to any person
+; obtaining a copy of this software and associated documentation
+; files (the "Software"), to deal in the Software without
+; restriction, including without limitation the rights to use,
+; copy, modify, merge, publish, distribute, sublicense, and/or sell
+; copies of the Software, and to permit persons to whom the
+; Software is furnished to do so, subject to the following
+; conditions:
+;
+; The above copyright notice and this permission notice shall be
+; included in all copies or substantial portions of the Software.
+;
+; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+; EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+; OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+; NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+; HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+; WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+; FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+; OTHER DEALINGS IN THE SOFTWARE.
+;
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%define RT_INCL_16BIT_SEGMENTS
+%include "iprt/asmdefs.mac"
+%include "iprt/err.mac"
+
+
+;*******************************************************************************
+;* External Symbols *
+;*******************************************************************************
+extern KernThunkStackTo32
+extern KernThunkStackTo16
+extern NAME(rtTimerOs2Tick)
+extern NAME(RTErrConvertFromOS2)
+BEGINDATA16
+extern NAME(g_fpfnDevHlp)
+
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define DevHlp_SetTimer 01dh
+%define DevHlp_ResetTimer 01eh
+
+
+BEGINCODE
+
+;;
+; Arms the our OS/2 timer.
+;
+; @returns IPRT status code.
+;
+BEGINPROC_EXPORTED rtTimerOs2Arm
+ call KernThunkStackTo16
+ push ebp
+ mov ebp, esp
+
+ ; jump to the 16-bit code.
+ ;jmp far dword NAME(rtTimerOs2Arm_16) wrt CODE16
+ db 066h
+ db 0eah
+ dw NAME(rtTimerOs2Arm_16) wrt CODE16
+ dw CODE16
+BEGINCODE16
+GLOBALNAME rtTimerOs2Arm_16
+
+ ; setup and do the call
+ push ds
+ push es
+ mov dx, DATA16
+ mov ds, dx
+ mov es, dx
+
+ mov ax, NAME(rtTimerOs2TickAsm) wrt CODE16
+ mov dl, DevHlp_SetTimer
+ call far [NAME(g_fpfnDevHlp)]
+
+ pop es
+ pop ds
+
+ ;jmp far dword NAME(rtTimerOs2Arm_32) wrt FLAT
+ db 066h
+ db 0eah
+ dd NAME(rtTimerOs2Arm_32) ;wrt FLAT
+ dw TEXT32 wrt FLAT
+BEGINCODE
+GLOBALNAME rtTimerOs2Arm_32
+ jc .error
+ xor eax, eax
+.done:
+
+ leave
+ push eax
+ call KernThunkStackTo32
+ pop eax
+ ret
+
+ ; convert the error code.
+.error:
+ and eax, 0ffffh
+ call NAME(RTErrConvertFromOS2)
+ jmp .done
+ENDPROC rtTimerOs2Arm
+
+
+;;
+; Dearms the our OS/2 timer.
+;
+; @returns IPRT status code.
+;
+BEGINPROC_EXPORTED rtTimerOs2Dearm
+ call KernThunkStackTo16
+ push ebp
+ mov ebp, esp
+
+ ; jump to the 16-bit code.
+ ;jmp far dword NAME(rtTimerOs2Dearm_16) wrt CODE16
+ db 066h
+ db 0eah
+ dw NAME(rtTimerOs2Dearm_16) wrt CODE16
+ dw CODE16
+BEGINCODE16
+GLOBALNAME rtTimerOs2Dearm_16
+
+ ; setup and do the call
+ push ds
+ push es
+ mov dx, DATA16
+ mov ds, dx
+ mov es, dx
+
+ mov ax, NAME(rtTimerOs2TickAsm) wrt CODE16
+ mov dl, DevHlp_ResetTimer
+ call far [NAME(g_fpfnDevHlp)]
+
+ pop es
+ pop ds
+
+ ;jmp far dword NAME(rtTimerOs2Dearm_32) wrt FLAT
+ db 066h
+ db 0eah
+ dd NAME(rtTimerOs2Dearm_32) ;wrt FLAT
+ dw TEXT32 wrt FLAT
+BEGINCODE
+GLOBALNAME rtTimerOs2Dearm_32
+ jc .error
+ xor eax, eax
+.done:
+
+ ; epilogue
+ leave
+ push eax
+ call KernThunkStackTo32
+ pop eax
+ ret
+
+ ; convert the error code.
+.error:
+ and eax, 0ffffh
+ call NAME(RTErrConvertFromOS2)
+ jmp .done
+ENDPROC rtTimerOs2Dearm
+
+
+BEGINCODE16
+
+;;
+; OS/2 timer tick callback.
+;
+BEGINPROC rtTimerOs2TickAsm
+ push ds
+ push es
+ push ecx
+ push edx
+ push eax
+
+ mov ax, DATA32 wrt FLAT
+ mov ds, ax
+ mov es, ax
+
+ ;jmp far dword NAME(rtTimerOs2TickAsm_32) wrt FLAT
+ db 066h
+ db 0eah
+ dd NAME(rtTimerOs2TickAsm_32) ;wrt FLAT
+ dw TEXT32 wrt FLAT
+BEGINCODE
+GLOBALNAME rtTimerOs2TickAsm_32
+ call KernThunkStackTo32
+ call NAME(rtTimerOs2Tick)
+ call KernThunkStackTo16
+
+ ;jmp far dword NAME(rtTimerOs2TickAsm_16) wrt CODE16
+ db 066h
+ db 0eah
+ dw NAME(rtTimerOs2TickAsm_16) wrt CODE16
+ dw CODE16
+BEGINCODE16
+GLOBALNAME rtTimerOs2TickAsm_16
+
+ pop eax
+ pop edx
+ pop ecx
+ pop es
+ pop ds
+ retf
+ENDPROC rtTimerOs2TickAsm
diff --git a/src/VBox/Runtime/r0drv/power-r0drv.h b/src/VBox/Runtime/r0drv/power-r0drv.h
new file mode 100644
index 00000000..cf4bb838
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/power-r0drv.h
@@ -0,0 +1,44 @@
+/* $Id: power-r0drv.h $ */
+/** @file
+ * IPRT - Power Management, Ring-0 Driver, Internal Header.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_power_r0drv_h
+#define IPRT_INCLUDED_SRC_r0drv_power_r0drv_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/power.h>
+
+RT_C_DECLS_BEGIN
+
+/* Called from initterm-r0drv.cpp: */
+DECLHIDDEN(int) rtR0PowerNotificationInit(void);
+DECLHIDDEN(void) rtR0PowerNotificationTerm(void);
+
+RT_C_DECLS_END
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_power_r0drv_h */
+
diff --git a/src/VBox/Runtime/r0drv/powernotification-r0drv.c b/src/VBox/Runtime/r0drv/powernotification-r0drv.c
new file mode 100644
index 00000000..71e3c5ca
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/powernotification-r0drv.c
@@ -0,0 +1,318 @@
+/* $Id: powernotification-r0drv.c $ */
+/** @file
+ * IPRT - Power Management, Ring-0 Driver, Event Notifications.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/power.h>
+#include "internal/iprt.h"
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/spinlock.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include "r0drv/mp-r0drv.h"
+#include "r0drv/power-r0drv.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Notification registration record tracking
+ * RTPowerRegisterNotification() calls.
+ */
+typedef struct RTPOWERNOTIFYREG
+{
+ /** Pointer to the next record. */
+ struct RTPOWERNOTIFYREG * volatile pNext;
+ /** The callback. */
+ PFNRTPOWERNOTIFICATION pfnCallback;
+ /** The user argument. */
+ void *pvUser;
+ /** Bit mask indicating whether we've done this callback or not. */
+ uint8_t bmDone[sizeof(void *)];
+} RTPOWERNOTIFYREG;
+/** Pointer to a registration record. */
+typedef RTPOWERNOTIFYREG *PRTPOWERNOTIFYREG;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** The spinlock protecting the list. */
+static RTSPINLOCK volatile g_hRTPowerNotifySpinLock = NIL_RTSPINLOCK;
+/** List of callbacks, in registration order. */
+static PRTPOWERNOTIFYREG volatile g_pRTPowerCallbackHead = NULL;
+/** The current done bit. */
+static uint32_t volatile g_iRTPowerDoneBit;
+/** The list generation.
+ * This is increased whenever the list has been modified. The callback routine
+ * make use of this to avoid having restart at the list head after each callback. */
+static uint32_t volatile g_iRTPowerGeneration;
+
+
+
+
+RTDECL(int) RTPowerSignalEvent(RTPOWEREVENT enmEvent)
+{
+ PRTPOWERNOTIFYREG pCur;
+ RTSPINLOCK hSpinlock;
+
+ /*
+ * This is a little bit tricky as we cannot be holding the spinlock
+ * while calling the callback. This means that the list might change
+ * while we're walking it, and that multiple events might be running
+ * concurrently (depending on the OS).
+ *
+ * So, the first measure is to employ a 32-bitmask for each
+ * record where we'll use a bit that rotates for each call to
+ * this function to indicate which records that has been
+ * processed. This will take care of both changes to the list
+ * and a reasonable amount of concurrent events.
+ *
+ * In order to avoid having to restart the list walks for every
+ * callback we make, we'll make use a list generation number that is
+ * incremented everytime the list is changed. So, if it remains
+ * unchanged over a callback we can safely continue the iteration.
+ */
+ uint32_t iDone = ASMAtomicIncU32(&g_iRTPowerDoneBit);
+ iDone %= RT_SIZEOFMEMB(RTPOWERNOTIFYREG, bmDone) * 8;
+
+ hSpinlock = g_hRTPowerNotifySpinLock;
+ if (hSpinlock == NIL_RTSPINLOCK)
+ return VERR_ACCESS_DENIED;
+ RTSpinlockAcquire(hSpinlock);
+
+ /* Clear the bit. */
+ for (pCur = g_pRTPowerCallbackHead; pCur; pCur = pCur->pNext)
+ ASMAtomicBitClear(&pCur->bmDone[0], iDone);
+
+ /* Iterate the records and perform the callbacks. */
+ do
+ {
+ uint32_t const iGeneration = ASMAtomicUoReadU32(&g_iRTPowerGeneration);
+
+ pCur = g_pRTPowerCallbackHead;
+ while (pCur)
+ {
+ if (!ASMAtomicBitTestAndSet(&pCur->bmDone[0], iDone))
+ {
+ PFNRTPOWERNOTIFICATION pfnCallback = pCur->pfnCallback;
+ void *pvUser = pCur->pvUser;
+ pCur = pCur->pNext;
+ RTSpinlockRelease(g_hRTPowerNotifySpinLock);
+
+ pfnCallback(enmEvent, pvUser);
+
+ /* carefully require the lock here, see RTR0MpNotificationTerm(). */
+ hSpinlock = g_hRTPowerNotifySpinLock;
+ if (hSpinlock == NIL_RTSPINLOCK)
+ return VERR_ACCESS_DENIED;
+ RTSpinlockAcquire(hSpinlock);
+ if (ASMAtomicUoReadU32(&g_iRTPowerGeneration) != iGeneration)
+ break;
+ }
+ else
+ pCur = pCur->pNext;
+ }
+ } while (pCur);
+
+ RTSpinlockRelease(hSpinlock);
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTPowerSignalEvent);
+
+
+RTDECL(int) RTPowerNotificationRegister(PFNRTPOWERNOTIFICATION pfnCallback, void *pvUser)
+{
+ PRTPOWERNOTIFYREG pCur;
+ PRTPOWERNOTIFYREG pNew;
+
+ /*
+ * Validation.
+ */
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+ AssertReturn(g_hRTPowerNotifySpinLock != NIL_RTSPINLOCK, VERR_WRONG_ORDER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ RTSpinlockAcquire(g_hRTPowerNotifySpinLock);
+ for (pCur = g_pRTPowerCallbackHead; pCur; pCur = pCur->pNext)
+ if ( pCur->pvUser == pvUser
+ && pCur->pfnCallback == pfnCallback)
+ break;
+ RTSpinlockRelease(g_hRTPowerNotifySpinLock);
+ AssertMsgReturn(!pCur, ("pCur=%p pfnCallback=%p pvUser=%p\n", pCur, pfnCallback, pvUser), VERR_ALREADY_EXISTS);
+
+ /*
+ * Allocate a new record and attempt to insert it.
+ */
+ pNew = (PRTPOWERNOTIFYREG)RTMemAlloc(sizeof(*pNew));
+ if (!pNew)
+ return VERR_NO_MEMORY;
+
+ pNew->pNext = NULL;
+ pNew->pfnCallback = pfnCallback;
+ pNew->pvUser = pvUser;
+ memset(&pNew->bmDone[0], 0xff, sizeof(pNew->bmDone));
+
+ RTSpinlockAcquire(g_hRTPowerNotifySpinLock);
+
+ pCur = g_pRTPowerCallbackHead;
+ if (!pCur)
+ g_pRTPowerCallbackHead = pNew;
+ else
+ {
+ for (pCur = g_pRTPowerCallbackHead; ; pCur = pCur->pNext)
+ if ( pCur->pvUser == pvUser
+ && pCur->pfnCallback == pfnCallback)
+ break;
+ else if (!pCur->pNext)
+ {
+ pCur->pNext = pNew;
+ pCur = NULL;
+ break;
+ }
+ }
+
+ ASMAtomicIncU32(&g_iRTPowerGeneration);
+
+ RTSpinlockRelease(g_hRTPowerNotifySpinLock);
+
+ /* duplicate? */
+ if (pCur)
+ {
+ RTMemFree(pCur);
+ AssertMsgFailedReturn(("pCur=%p pfnCallback=%p pvUser=%p\n", pCur, pfnCallback, pvUser), VERR_ALREADY_EXISTS);
+ }
+
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTPowerNotificationRegister);
+
+
+RTDECL(int) RTPowerNotificationDeregister(PFNRTPOWERNOTIFICATION pfnCallback, void *pvUser)
+{
+ PRTPOWERNOTIFYREG pPrev;
+ PRTPOWERNOTIFYREG pCur;
+
+ /*
+ * Validation.
+ */
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+ AssertReturn(g_hRTPowerNotifySpinLock != NIL_RTSPINLOCK, VERR_WRONG_ORDER);
+ RT_ASSERT_INTS_ON();
+
+ /*
+ * Find and unlink the record from the list.
+ */
+ RTSpinlockAcquire(g_hRTPowerNotifySpinLock);
+ pPrev = NULL;
+ for (pCur = g_pRTPowerCallbackHead; pCur; pCur = pCur->pNext)
+ {
+ if ( pCur->pvUser == pvUser
+ && pCur->pfnCallback == pfnCallback)
+ break;
+ pPrev = pCur;
+ }
+ if (pCur)
+ {
+ if (pPrev)
+ pPrev->pNext = pCur->pNext;
+ else
+ g_pRTPowerCallbackHead = pCur->pNext;
+ ASMAtomicIncU32(&g_iRTPowerGeneration);
+ }
+ RTSpinlockRelease(g_hRTPowerNotifySpinLock);
+
+ if (!pCur)
+ return VERR_NOT_FOUND;
+
+ /*
+ * Invalidate and free the record.
+ */
+ pCur->pNext = NULL;
+ pCur->pfnCallback = NULL;
+ RTMemFree(pCur);
+
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTPowerNotificationDeregister);
+
+
+DECLHIDDEN(int) rtR0PowerNotificationInit(void)
+{
+ int rc = RTSpinlockCreate((PRTSPINLOCK)&g_hRTPowerNotifySpinLock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTR0Power");
+ if (RT_SUCCESS(rc))
+ {
+ /** @todo OS specific init here */
+ return rc;
+#if 0
+ RTSpinlockDestroy(g_hRTPowerNotifySpinLock);
+ g_hRTPowerNotifySpinLock = NIL_RTSPINLOCK;
+#endif
+ }
+ return rc;
+}
+
+
+DECLHIDDEN(void) rtR0PowerNotificationTerm(void)
+{
+ PRTPOWERNOTIFYREG pHead;
+ RTSPINLOCK hSpinlock = g_hRTPowerNotifySpinLock;
+ AssertReturnVoid(hSpinlock != NIL_RTSPINLOCK);
+
+ /** @todo OS specific term here */
+
+ /* pick up the list and the spinlock. */
+ RTSpinlockAcquire(hSpinlock);
+ ASMAtomicWriteHandle(&g_hRTPowerNotifySpinLock, NIL_RTSPINLOCK);
+ pHead = g_pRTPowerCallbackHead;
+ g_pRTPowerCallbackHead = NULL;
+ ASMAtomicIncU32(&g_iRTPowerGeneration);
+ RTSpinlockRelease(hSpinlock);
+
+ /* free the list. */
+ while (pHead)
+ {
+ PRTPOWERNOTIFYREG pFree = pHead;
+ pHead = pHead->pNext;
+
+ pFree->pNext = NULL;
+ pFree->pfnCallback = NULL;
+ RTMemFree(pFree);
+ }
+
+ RTSpinlockDestroy(hSpinlock);
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/Makefile.kup b/src/VBox/Runtime/r0drv/solaris/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/Makefile.kup
diff --git a/src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c
new file mode 100644
index 00000000..4bcdc883
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c
@@ -0,0 +1,66 @@
+/* $Id: RTLogWriteDebugger-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Log To Debugger, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/log.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+
+
+
+RTDECL(void) RTLogWriteDebugger(const char *pch, size_t cb)
+{
+ if (pch[cb] != '\0')
+ AssertBreakpoint();
+
+ /* cmn_err() acquires adaptive mutexes. Not preemption safe, see @bugref{6657}. */
+ if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
+ return;
+
+ if ( !g_frtSolSplSetsEIF
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ || ASMIntAreEnabled()
+#else
+/* PORTME: Check if interrupts are enabled, if applicable. */
+#endif
+ )
+ {
+ cmn_err(CE_CONT, pch);
+ }
+
+ return;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/RTMpPokeCpu-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/RTMpPokeCpu-r0drv-solaris.c
new file mode 100644
index 00000000..d2245742
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/RTMpPokeCpu-r0drv-solaris.c
@@ -0,0 +1,50 @@
+/* $Id: RTMpPokeCpu-r0drv-solaris.c $ */
+/** @file
+ * IPRT - RTMpPokeCpu, Solaris Implementation.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mp.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+
+
+
+RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
+{
+ RT_ASSERT_INTS_ON();
+ if (idCpu < ncpus)
+ poke_cpu(idCpu);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/alloc-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/alloc-r0drv-solaris.c
new file mode 100644
index 00000000..37d33ab5
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/alloc-r0drv-solaris.c
@@ -0,0 +1,206 @@
+/* $Id: alloc-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Memory Allocation, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/log.h>
+#include <iprt/param.h>
+#include <iprt/thread.h>
+#include "r0drv/alloc-r0drv.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+static ddi_dma_attr_t s_rtR0SolDmaAttr =
+{
+ DMA_ATTR_V0, /* Version Number */
+ (uint64_t)0, /* Lower limit */
+ (uint64_t)0, /* High limit */
+ (uint64_t)0xffffffff, /* Counter limit */
+ (uint64_t)PAGESIZE, /* Alignment */
+ (uint64_t)PAGESIZE, /* Burst size */
+ (uint64_t)PAGESIZE, /* Effective DMA size */
+ (uint64_t)0xffffffff, /* Max DMA xfer size */
+ (uint64_t)0xffffffff, /* Segment boundary */
+ 1, /* Scatter-gather list length (1 for contiguous) */
+ 1, /* Device granularity */
+ 0 /* Bus-specific flags */
+};
+
+extern void *contig_alloc(size_t cb, ddi_dma_attr_t *pDmaAttr, size_t uAlign, int fCanSleep);
+
+
+/**
+ * OS specific allocation function.
+ */
+DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
+{
+ size_t cbAllocated = cb;
+ PRTMEMHDR pHdr;
+
+#ifdef RT_ARCH_AMD64
+ if (fFlags & RTMEMHDR_FLAG_EXEC)
+ {
+ AssertReturn(!(fFlags & RTMEMHDR_FLAG_ANY_CTX), VERR_NOT_SUPPORTED);
+ cbAllocated = RT_ALIGN_Z(cb + sizeof(*pHdr), PAGE_SIZE) - sizeof(*pHdr);
+ pHdr = (PRTMEMHDR)segkmem_alloc(heaptext_arena, cbAllocated + sizeof(*pHdr), KM_SLEEP);
+ }
+ else
+#endif
+ {
+ unsigned fKmFlags = fFlags & RTMEMHDR_FLAG_ANY_CTX_ALLOC ? KM_NOSLEEP : KM_SLEEP;
+ if (fFlags & RTMEMHDR_FLAG_ZEROED)
+ pHdr = (PRTMEMHDR)kmem_zalloc(cb + sizeof(*pHdr), fKmFlags);
+ else
+ pHdr = (PRTMEMHDR)kmem_alloc(cb + sizeof(*pHdr), fKmFlags);
+ }
+ if (RT_UNLIKELY(!pHdr))
+ {
+ LogRel(("rtMemAllocEx(%u, %#x) failed\n", (unsigned)cb + sizeof(*pHdr), fFlags));
+ return VERR_NO_MEMORY;
+ }
+
+ pHdr->u32Magic = RTMEMHDR_MAGIC;
+ pHdr->fFlags = fFlags;
+ pHdr->cb = cbAllocated;
+ pHdr->cbReq = cb;
+
+ *ppHdr = pHdr;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * OS specific free function.
+ */
+DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr)
+{
+ pHdr->u32Magic += 1;
+#ifdef RT_ARCH_AMD64
+ if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC)
+ segkmem_free(heaptext_arena, pHdr, pHdr->cb + sizeof(*pHdr));
+ else
+#endif
+ kmem_free(pHdr, pHdr->cb + sizeof(*pHdr));
+}
+
+
+/**
+ * Allocates physical memory which satisfy the given constraints.
+ *
+ * @param uPhysHi The upper physical address limit (inclusive).
+ * @param puPhys Where to store the physical address of the allocated
+ * memory. Optional, can be NULL.
+ * @param cb Size of allocation.
+ * @param uAlignment Alignment.
+ * @param fContig Whether the memory must be physically contiguous or
+ * not.
+ *
+ * @returns Virtual address of allocated memory block or NULL if allocation
+ * failed.
+ */
+DECLHIDDEN(void *) rtR0SolMemAlloc(uint64_t uPhysHi, uint64_t *puPhys, size_t cb, uint64_t uAlignment, bool fContig)
+{
+ if ((cb & PAGEOFFSET) != 0)
+ return NULL;
+
+ size_t cPages = (cb + PAGESIZE - 1) >> PAGESHIFT;
+ if (!cPages)
+ return NULL;
+
+ ddi_dma_attr_t DmaAttr = s_rtR0SolDmaAttr;
+ DmaAttr.dma_attr_addr_hi = uPhysHi;
+ DmaAttr.dma_attr_align = uAlignment;
+ if (!fContig)
+ DmaAttr.dma_attr_sgllen = cPages > INT_MAX ? INT_MAX - 1 : cPages;
+ else
+ AssertRelease(DmaAttr.dma_attr_sgllen == 1);
+
+ void *pvMem = contig_alloc(cb, &DmaAttr, PAGESIZE, 1 /* can sleep */);
+ if (!pvMem)
+ {
+ LogRel(("rtR0SolMemAlloc failed. cb=%u Align=%u fContig=%d\n", (unsigned)cb, (unsigned)uAlignment, fContig));
+ return NULL;
+ }
+
+ pfn_t PageFrameNum = hat_getpfnum(kas.a_hat, (caddr_t)pvMem);
+ AssertRelease(PageFrameNum != PFN_INVALID);
+ if (puPhys)
+ *puPhys = (uint64_t)PageFrameNum << PAGESHIFT;
+
+ return pvMem;
+}
+
+
+/**
+ * Frees memory allocated using rtR0SolMemAlloc().
+ *
+ * @param pv The memory to free.
+ * @param cb Size of the memory block
+ */
+DECLHIDDEN(void) rtR0SolMemFree(void *pv, size_t cb)
+{
+ if (RT_LIKELY(pv))
+ g_pfnrtR0Sol_contig_free(pv, cb);
+}
+
+
+RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
+{
+ AssertPtrReturn(pPhys, NULL);
+ AssertReturn(cb > 0, NULL);
+ RT_ASSERT_PREEMPTIBLE();
+
+ /* Allocate physically contiguous (< 4GB) page-aligned memory. */
+ uint64_t uPhys;
+ void *pvMem = rtR0SolMemAlloc((uint64_t)_4G - 1, &uPhys, cb, PAGESIZE, true /* fContig */);
+ if (RT_UNLIKELY(!pvMem))
+ {
+ LogRel(("RTMemContAlloc failed to allocate %u bytes\n", cb));
+ return NULL;
+ }
+
+ Assert(uPhys < _4G);
+ *pPhys = uPhys;
+ return pvMem;
+}
+
+
+RTR0DECL(void) RTMemContFree(void *pv, size_t cb)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ rtR0SolMemFree(pv, cb);
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/assert-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/assert-r0drv-solaris.c
new file mode 100644
index 00000000..bcd19ae0
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/assert-r0drv-solaris.c
@@ -0,0 +1,77 @@
+/* $Id: assert-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Assertion Workers, Ring-0 Drivers, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/assert.h>
+
+#include <iprt/asm.h>
+#include <iprt/log.h>
+#include <iprt/stdarg.h>
+#include <iprt/string.h>
+
+#include "internal/assert.h"
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
+{
+ uprintf("\r\n!!Assertion Failed!!\r\n"
+ "Expression: %s\r\n"
+ "Location : %s(%d) %s\r\n",
+ pszExpr, pszFile, uLine, pszFunction);
+}
+
+
+DECLHIDDEN(void) rtR0AssertNativeMsg2V(bool fInitial, const char *pszFormat, va_list va)
+{
+ char szMsg[256];
+
+ RTStrPrintfV(szMsg, sizeof(szMsg) - 1, pszFormat, va);
+ szMsg[sizeof(szMsg) - 1] = '\0';
+ uprintf("%s", szMsg);
+
+ NOREF(fInitial);
+}
+
+
+RTR0DECL(void) RTR0AssertPanicSystem(void)
+{
+ const char *psz = &g_szRTAssertMsg2[0];
+ const char *pszEnd = &g_szRTAssertMsg2[sizeof(g_szRTAssertMsg2)];
+ while (psz < pszEnd && (*psz == ' ' || *psz == '\t' || *psz == '\n' || *psz == '\r'))
+ psz++;
+
+ if (psz < pszEnd && *psz)
+ assfail(psz, g_pszRTAssertFile, g_u32RTAssertLine);
+ else
+ assfail(g_szRTAssertMsg1, g_pszRTAssertFile, g_u32RTAssertLine);
+ g_szRTAssertMsg2[0] = '\0';
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/dbgkrnlinfo-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/dbgkrnlinfo-r0drv-solaris.c
new file mode 100644
index 00000000..34fd359b
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/dbgkrnlinfo-r0drv-solaris.c
@@ -0,0 +1,339 @@
+/* $Id: dbgkrnlinfo-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Kernel debug information, Ring-0 Driver, Solaris Code.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/dbg.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/log.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Solaris kernel debug info instance data.
+ */
+typedef struct RTDBGKRNLINFOINT
+{
+ /** Magic value (RTDBGKRNLINFO_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The number of threads referencing this object. */
+ uint32_t volatile cRefs;
+ /** Pointer to the genunix CTF handle. */
+ ctf_file_t *pGenUnixCTF;
+ /** Pointer to the genunix module handle. */
+ modctl_t *pGenUnixMod;
+} RTDBGKRNLINFOINT;
+/** Pointer to the solaris kernel debug info instance data. */
+typedef struct RTDBGKRNLINFOINT *PRTDBGKRNLINFOINT;
+
+
+/**
+ * Retains a kernel module and opens the CTF data associated with it.
+ *
+ * @param pszModule The name of the module to open.
+ * @param ppMod Where to store the module handle.
+ * @param ppCTF Where to store the module's CTF handle.
+ *
+ * @return IPRT status code.
+ */
+static int rtR0DbgKrnlInfoModRetain(char *pszModule, modctl_t **ppMod, ctf_file_t **ppCTF)
+{
+ AssertPtrReturn(pszModule, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(ppMod, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(ppCTF, VERR_INVALID_PARAMETER);
+
+ int rc = VINF_SUCCESS;
+ modid_t ModId = mod_name_to_modid(pszModule);
+ if (ModId != -1)
+ {
+ *ppMod = mod_hold_by_id(ModId);
+ if (*ppMod)
+ {
+ /*
+ * Hold mod_lock as ctf_modopen may update the module with uncompressed CTF data.
+ */
+ int err;
+ mutex_enter(&mod_lock);
+ *ppCTF = ctf_modopen(((modctl_t *)*ppMod)->mod_mp, &err);
+ mutex_exit(&mod_lock);
+ mod_release_mod(*ppMod);
+
+ if (*ppCTF)
+ return VINF_SUCCESS;
+ else
+ {
+ LogRel(("rtR0DbgKrnlInfoModRetain: ctf_modopen failed for '%s' err=%d\n", pszModule, err));
+ rc = VERR_INTERNAL_ERROR_3;
+ }
+ }
+ else
+ {
+ LogRel(("rtR0DbgKrnlInfoModRetain: mod_hold_by_id failed for '%s'\n", pszModule));
+ rc = VERR_INTERNAL_ERROR_2;
+ }
+ }
+ else
+ {
+ LogRel(("rtR0DbgKrnlInfoModRetain: mod_name_to_modid failed for '%s'\n", pszModule));
+ rc = VERR_INTERNAL_ERROR;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Releases the kernel module and closes its CTF data.
+ *
+ * @param pMod Pointer to the module handle.
+ * @param pCTF Pointer to the module's CTF handle.
+ */
+static void rtR0DbgKrnlInfoModRelease(modctl_t *pMod, ctf_file_t *pCTF)
+{
+ AssertPtrReturnVoid(pMod);
+ AssertPtrReturnVoid(pCTF);
+
+ ctf_close(pCTF);
+}
+
+
+/**
+ * Helper for opening the specified kernel module.
+ *
+ * @param pszModule The name of the module.
+ * @param ppMod Where to store the module handle.
+ * @param ppCtf Where to store the module's CTF handle.
+ *
+ * @returns Pointer to the CTF structure for the module.
+ */
+static int rtR0DbgKrnlInfoModRetainEx(const char *pszModule, modctl_t **ppMod, ctf_file_t **ppCtf)
+{
+ char *pszMod = RTStrDup(pszModule);
+ if (RT_LIKELY(pszMod))
+ {
+ int rc = rtR0DbgKrnlInfoModRetain(pszMod, ppMod, ppCtf);
+ RTStrFree(pszMod);
+ if (RT_SUCCESS(rc))
+ {
+ AssertPtrReturn(*ppMod, VERR_INTERNAL_ERROR_2);
+ AssertPtrReturn(*ppCtf, VERR_INTERNAL_ERROR_3);
+ }
+ return rc;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoOpen(PRTDBGKRNLINFO phKrnlInfo, uint32_t fFlags)
+{
+ AssertReturn(fFlags == 0, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phKrnlInfo, VERR_INVALID_POINTER);
+ /* This can be called as part of IPRT init, in which case we have no thread preempt information yet. */
+ if (g_frtSolInitDone)
+ RT_ASSERT_PREEMPTIBLE();
+
+ *phKrnlInfo = NIL_RTDBGKRNLINFO;
+ PRTDBGKRNLINFOINT pThis = (PRTDBGKRNLINFOINT)RTMemAllocZ(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ char szGenUnixModName[] = "genunix";
+ int rc = rtR0DbgKrnlInfoModRetain(szGenUnixModName, &pThis->pGenUnixMod, &pThis->pGenUnixCTF);
+ if (RT_SUCCESS(rc))
+ {
+ pThis->u32Magic = RTDBGKRNLINFO_MAGIC;
+ pThis->cRefs = 1;
+
+ *phKrnlInfo = pThis;
+ return VINF_SUCCESS;
+ }
+
+ LogRel(("RTR0DbgKrnlInfoOpen: rtR0DbgKrnlInfoModRetain failed rc=%d.\n", rc));
+ RTMemFree(pThis);
+ return rc;
+}
+
+
+RTR0DECL(uint32_t) RTR0DbgKrnlInfoRetain(RTDBGKRNLINFO hKrnlInfo)
+{
+ PRTDBGKRNLINFOINT pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, UINT32_MAX);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), UINT32_MAX);
+
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+ return cRefs;
+}
+
+
+RTR0DECL(uint32_t) RTR0DbgKrnlInfoRelease(RTDBGKRNLINFO hKrnlInfo)
+{
+ PRTDBGKRNLINFOINT pThis = hKrnlInfo;
+ if (pThis == NIL_RTDBGKRNLINFO)
+ return 0;
+ AssertPtrReturn(pThis, UINT32_MAX);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), UINT32_MAX);
+ if (g_frtSolInitDone)
+ RT_ASSERT_PREEMPTIBLE();
+
+ uint32_t cRefs = ASMAtomicDecU32(&pThis->cRefs);
+ if (cRefs == 0)
+ {
+ pThis->u32Magic = ~RTDBGKRNLINFO_MAGIC;
+ rtR0DbgKrnlInfoModRelease(pThis->pGenUnixMod, pThis->pGenUnixCTF);
+ RTMemFree(pThis);
+ }
+ return cRefs;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoQueryMember(RTDBGKRNLINFO hKrnlInfo, const char *pszModule, const char *pszStructure,
+ const char *pszMember, size_t *poffMember)
+{
+ PRTDBGKRNLINFOINT pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertPtrReturn(pszMember, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pszStructure, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(poffMember, VERR_INVALID_PARAMETER);
+ if (g_frtSolInitDone)
+ RT_ASSERT_PREEMPTIBLE();
+
+ ctf_file_t *pCtf = NULL;
+ modctl_t *pMod = NULL;
+ if (!pszModule)
+ {
+ pCtf = pThis->pGenUnixCTF;
+ pMod = pThis->pGenUnixMod;
+ }
+ else
+ {
+ int rc2 = rtR0DbgKrnlInfoModRetainEx(pszModule, &pMod, &pCtf);
+ if (RT_FAILURE(rc2))
+ return rc2;
+ Assert(pMod);
+ Assert(pCtf);
+ }
+
+ int rc = VERR_NOT_FOUND;
+ ctf_id_t TypeIdent = ctf_lookup_by_name(pCtf, pszStructure);
+ if (TypeIdent != CTF_ERR)
+ {
+ ctf_membinfo_t MemberInfo;
+ RT_ZERO(MemberInfo);
+ if (ctf_member_info(pCtf, TypeIdent, pszMember, &MemberInfo) != CTF_ERR)
+ {
+ *poffMember = (MemberInfo.ctm_offset >> 3);
+ rc = VINF_SUCCESS;
+ }
+ }
+
+ if (pszModule)
+ rtR0DbgKrnlInfoModRelease(pMod, pCtf);
+ return rc;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoQuerySymbol(RTDBGKRNLINFO hKrnlInfo, const char *pszModule,
+ const char *pszSymbol, void **ppvSymbol)
+{
+ PRTDBGKRNLINFOINT pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertPtrReturn(pszSymbol, VERR_INVALID_PARAMETER);
+ AssertPtrNullReturn(ppvSymbol, VERR_INVALID_PARAMETER);
+ AssertReturn(!pszModule, VERR_MODULE_NOT_FOUND);
+ if (g_frtSolInitDone)
+ RT_ASSERT_PREEMPTIBLE();
+
+ uintptr_t uValue = kobj_getsymvalue((char *)pszSymbol, 1 /* only kernel */);
+ if (ppvSymbol)
+ *ppvSymbol = (void *)uValue;
+ if (uValue)
+ return VINF_SUCCESS;
+ return VERR_SYMBOL_NOT_FOUND;
+}
+
+
+RTR0DECL(int) RTR0DbgKrnlInfoQuerySize(RTDBGKRNLINFO hKrnlInfo, const char *pszModule, const char *pszType, size_t *pcbType)
+{
+ PRTDBGKRNLINFOINT pThis = hKrnlInfo;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertPtrReturn(pszType, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pcbType, VERR_INVALID_PARAMETER);
+ if (g_frtSolInitDone)
+ RT_ASSERT_PREEMPTIBLE();
+
+ modctl_t *pMod = NULL;
+ ctf_file_t *pCtf = NULL;
+ if (!pszModule)
+ {
+ pCtf = pThis->pGenUnixCTF;
+ pMod = pThis->pGenUnixMod;
+ }
+ else
+ {
+ int rc2 = rtR0DbgKrnlInfoModRetainEx(pszModule, &pMod, &pCtf);
+ if (RT_FAILURE(rc2))
+ return rc2;
+ Assert(pMod);
+ Assert(pCtf);
+ }
+
+ int rc = VERR_NOT_FOUND;
+ ctf_id_t TypeIdent = ctf_lookup_by_name(pCtf, pszType);
+ if (TypeIdent != CTF_ERR)
+ {
+ ssize_t cbType = ctf_type_size(pCtf, TypeIdent);
+ if (cbType > 0)
+ {
+ *pcbType = cbType;
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_WRONG_TYPE;
+ }
+
+ if (pszModule)
+ rtR0DbgKrnlInfoModRelease(pMod, pCtf);
+ return rc;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c
new file mode 100644
index 00000000..e6dfc565
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c
@@ -0,0 +1,282 @@
+/* $Id: initterm-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Initialization & Termination, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include "internal/initterm.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Kernel debug info handle. */
+RTDBGKRNLINFO g_hKrnlDbgInfo;
+/** Indicates that the spl routines (and therefore a bunch of other ones too)
+ * will set EFLAGS::IF and break code that disables interrupts. */
+bool g_frtSolSplSetsEIF = false;
+/** timeout_generic address. */
+PFNSOL_timeout_generic g_pfnrtR0Sol_timeout_generic = NULL;
+/** untimeout_generic address. */
+PFNSOL_untimeout_generic g_pfnrtR0Sol_untimeout_generic = NULL;
+/** cyclic_reprogram address. */
+PFNSOL_cyclic_reprogram g_pfnrtR0Sol_cyclic_reprogram = NULL;
+/** page_noreloc_supported address. */
+PFNSOL_page_noreloc_supported g_pfnrtR0Sol_page_noreloc_supported = NULL;
+/** Whether to use the kernel page freelist. */
+bool g_frtSolUseKflt = false;
+/** Whether we've completed R0 initialization. */
+bool g_frtSolInitDone = false;
+/** Whether to use old-style xc_call interface. */
+bool g_frtSolOldIPI = false;
+/** Whether to use old-style xc_call interface using one ulong_t as the CPU set
+ * representation. */
+bool g_frtSolOldIPIUlong = false;
+/** The xc_call callout table structure. */
+RTR0FNSOLXCCALL g_rtSolXcCall;
+/** Whether to use the old-style installctx()/removectx() routines. */
+bool g_frtSolOldThreadCtx = false;
+/** The thread-context hooks callout table structure. */
+RTR0FNSOLTHREADCTX g_rtSolThreadCtx;
+/** Thread preemption offset in the thread structure. */
+size_t g_offrtSolThreadPreempt;
+/** Thread ID offset in the thread structure. */
+size_t g_offrtSolThreadId;
+/** The interrupt (pinned) thread pointer offset in the thread structure. */
+size_t g_offrtSolThreadIntrThread;
+/** The dispatcher lock pointer offset in the thread structure. */
+size_t g_offrtSolThreadLock;
+/** The process pointer offset in the thread structure. */
+size_t g_offrtSolThreadProc;
+/** Host scheduler preemption offset. */
+size_t g_offrtSolCpuPreempt;
+/** Host scheduler force preemption offset. */
+size_t g_offrtSolCpuForceKernelPreempt;
+/* Resolve using dl_lookup (remove if no longer relevant for supported S10 versions) */
+extern void contig_free(void *addr, size_t size);
+#pragma weak contig_free
+/** contig_free address. */
+PFNSOL_contig_free g_pfnrtR0Sol_contig_free = contig_free;
+
+DECLHIDDEN(int) rtR0InitNative(void)
+{
+ /*
+ * IPRT has not yet been initialized at this point, so use Solaris' native cmn_err() for logging.
+ */
+ int rc = RTR0DbgKrnlInfoOpen(&g_hKrnlDbgInfo, 0 /* fFlags */);
+ if (RT_SUCCESS(rc))
+ {
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ /*
+ * Detect whether spl*() is preserving the interrupt flag or not.
+ * This is a problem on S10.
+ */
+ RTCCUINTREG uOldFlags = ASMIntDisableFlags();
+ int iOld = splr(DISP_LEVEL);
+ if (ASMIntAreEnabled())
+ g_frtSolSplSetsEIF = true;
+ splx(iOld);
+ if (ASMIntAreEnabled())
+ g_frtSolSplSetsEIF = true;
+ ASMSetFlags(uOldFlags);
+#else
+ /* PORTME: See if the amd64/x86 problem applies to this architecture. */
+#endif
+ /*
+ * Mandatory: Preemption offsets.
+ */
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_runrun", &g_offrtSolCpuPreempt);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_runrun!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_kprunrun", &g_offrtSolCpuForceKernelPreempt);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_kprunrun!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_preempt", &g_offrtSolThreadPreempt);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find kthread_t::t_preempt!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_did", &g_offrtSolThreadId);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find kthread_t::t_did!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_intr", &g_offrtSolThreadIntrThread);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find kthread_t::t_intr!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_lockp", &g_offrtSolThreadLock);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find kthread_t::t_lockp!\n");
+ goto errorbail;
+ }
+
+ rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_procp", &g_offrtSolThreadProc);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "Failed to find kthread_t::t_procp!\n");
+ goto errorbail;
+ }
+ cmn_err(CE_CONT, "!cpu_t::cpu_runrun @ 0x%lx (%ld)\n", g_offrtSolCpuPreempt, g_offrtSolCpuPreempt);
+ cmn_err(CE_CONT, "!cpu_t::cpu_kprunrun @ 0x%lx (%ld)\n", g_offrtSolCpuForceKernelPreempt, g_offrtSolCpuForceKernelPreempt);
+ cmn_err(CE_CONT, "!kthread_t::t_preempt @ 0x%lx (%ld)\n", g_offrtSolThreadPreempt, g_offrtSolThreadPreempt);
+ cmn_err(CE_CONT, "!kthread_t::t_did @ 0x%lx (%ld)\n", g_offrtSolThreadId, g_offrtSolThreadId);
+ cmn_err(CE_CONT, "!kthread_t::t_intr @ 0x%lx (%ld)\n", g_offrtSolThreadIntrThread, g_offrtSolThreadIntrThread);
+ cmn_err(CE_CONT, "!kthread_t::t_lockp @ 0x%lx (%ld)\n", g_offrtSolThreadLock, g_offrtSolThreadLock);
+ cmn_err(CE_CONT, "!kthread_t::t_procp @ 0x%lx (%ld)\n", g_offrtSolThreadProc, g_offrtSolThreadProc);
+
+ /*
+ * Mandatory: CPU cross call infrastructure. Refer the-solaris-kernel.h for details.
+ */
+ rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "xc_init_cpu", NULL /* ppvSymbol */);
+ if (RT_SUCCESS(rc))
+ {
+ if (ncpus > IPRT_SOL_NCPUS)
+ {
+ cmn_err(CE_NOTE, "rtR0InitNative: CPU count mismatch! ncpus=%d IPRT_SOL_NCPUS=%d\n", ncpus, IPRT_SOL_NCPUS);
+ rc = VERR_NOT_SUPPORTED;
+ goto errorbail;
+ }
+ g_rtSolXcCall.u.pfnSol_xc_call = (void *)xc_call;
+ }
+ else
+ {
+ g_frtSolOldIPI = true;
+ g_rtSolXcCall.u.pfnSol_xc_call_old = (void *)xc_call;
+ if (max_cpuid + 1 == sizeof(ulong_t) * 8)
+ {
+ g_frtSolOldIPIUlong = true;
+ g_rtSolXcCall.u.pfnSol_xc_call_old_ulong = (void *)xc_call;
+ }
+ else if (max_cpuid + 1 != IPRT_SOL_NCPUS)
+ {
+ cmn_err(CE_NOTE, "rtR0InitNative: cpuset_t size mismatch! max_cpuid=%d IPRT_SOL_NCPUS=%d\n", max_cpuid,
+ IPRT_SOL_NCPUS);
+ rc = VERR_NOT_SUPPORTED;
+ goto errorbail;
+ }
+ }
+
+ /*
+ * Mandatory: Thread-context hooks.
+ */
+ rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "exitctx", NULL /* ppvSymbol */);
+ if (RT_SUCCESS(rc))
+ {
+ g_rtSolThreadCtx.Install.pfnSol_installctx = (void *)installctx;
+ g_rtSolThreadCtx.Remove.pfnSol_removectx = (void *)removectx;
+ }
+ else
+ {
+ g_frtSolOldThreadCtx = true;
+ g_rtSolThreadCtx.Install.pfnSol_installctx_old = (void *)installctx;
+ g_rtSolThreadCtx.Remove.pfnSol_removectx_old = (void *)removectx;
+ }
+
+ /*
+ * Optional: Timeout hooks.
+ */
+ RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "timeout_generic",
+ (void **)&g_pfnrtR0Sol_timeout_generic);
+ RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "untimeout_generic",
+ (void **)&g_pfnrtR0Sol_untimeout_generic);
+ if ((g_pfnrtR0Sol_timeout_generic == NULL) != (g_pfnrtR0Sol_untimeout_generic == NULL))
+ {
+ static const char *s_apszFn[2] = { "timeout_generic", "untimeout_generic" };
+ bool iMissingFn = g_pfnrtR0Sol_timeout_generic == NULL;
+ cmn_err(CE_NOTE, "rtR0InitNative: Weird! Found %s but not %s!\n", s_apszFn[!iMissingFn], s_apszFn[iMissingFn]);
+ g_pfnrtR0Sol_timeout_generic = NULL;
+ g_pfnrtR0Sol_untimeout_generic = NULL;
+ }
+ RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "cyclic_reprogram",
+ (void **)&g_pfnrtR0Sol_cyclic_reprogram);
+
+ /*
+ * Optional: Querying page no-relocation support.
+ */
+ RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /*pszModule */, "page_noreloc_supported",
+ (void **)&g_pfnrtR0Sol_page_noreloc_supported);
+
+ /*
+ * Weak binding failures: contig_free
+ */
+ if (g_pfnrtR0Sol_contig_free == NULL)
+ {
+ rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "contig_free",
+ (void **)&g_pfnrtR0Sol_contig_free);
+ if (RT_FAILURE(rc))
+ {
+ cmn_err(CE_NOTE, "rtR0InitNative: failed to find contig_free!\n");
+ goto errorbail;
+ }
+ }
+
+ g_frtSolInitDone = true;
+ return VINF_SUCCESS;
+ }
+ else
+ {
+ cmn_err(CE_NOTE, "RTR0DbgKrnlInfoOpen failed. rc=%d\n", rc);
+ return rc;
+ }
+
+errorbail:
+ RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
+ return rc;
+}
+
+
+DECLHIDDEN(void) rtR0TermNative(void)
+{
+ RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
+ g_frtSolInitDone = false;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c
new file mode 100644
index 00000000..84813208
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c
@@ -0,0 +1,1166 @@
+/* $Id: memobj-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/memobj.h>
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/log.h>
+#include <iprt/mem.h>
+#include <iprt/param.h>
+#include <iprt/process.h>
+#include "internal/memobj.h"
+#include "memobj-r0drv-solaris.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase)
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The Solaris version of the memory object structure.
+ */
+typedef struct RTR0MEMOBJSOL
+{
+ /** The core structure. */
+ RTR0MEMOBJINTERNAL Core;
+ /** Pointer to kernel memory cookie. */
+ ddi_umem_cookie_t Cookie;
+ /** Shadow locked pages. */
+ void *pvHandle;
+ /** Access during locking. */
+ int fAccess;
+ /** Set if large pages are involved in an RTR0MEMOBJTYPE_PHYS
+ * allocation. */
+ bool fLargePage;
+ /** Whether we have individual pages or a kernel-mapped virtual memory block in
+ * an RTR0MEMOBJTYPE_PHYS_NC allocation. */
+ bool fIndivPages;
+} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static vnode_t g_PageVnode;
+static kmutex_t g_OffsetMtx;
+static u_offset_t g_offPage;
+
+static vnode_t g_LargePageVnode;
+static kmutex_t g_LargePageOffsetMtx;
+static u_offset_t g_offLargePage;
+static bool g_fLargePageNoReloc;
+
+
+/**
+ * Returns the physical address for a virtual address.
+ *
+ * @param pv The virtual address.
+ *
+ * @returns The physical address corresponding to @a pv.
+ */
+static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
+{
+ struct hat *pHat = NULL;
+ pfn_t PageFrameNum = 0;
+ uintptr_t uVirtAddr = (uintptr_t)pv;
+
+ if (SOL_IS_KRNL_ADDR(pv))
+ pHat = kas.a_hat;
+ else
+ {
+ proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
+ AssertRelease(pProcess);
+ pHat = pProcess->p_as->a_hat;
+ }
+
+ PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
+ AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
+ return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK));
+}
+
+
+/**
+ * Returns the physical address for a page.
+ *
+ * @param pPage Pointer to the page.
+ *
+ * @returns The physical address for a page.
+ */
+static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage)
+{
+ AssertPtr(pPage);
+ pfn_t PageFrameNum = page_pptonum(pPage);
+ AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n"));
+ return (uint64_t)PageFrameNum << PAGE_SHIFT;
+}
+
+
+/**
+ * Allocates one page.
+ *
+ * @param virtAddr The virtual address to which this page maybe mapped in
+ * the future.
+ *
+ * @returns Pointer to the allocated page, NULL on failure.
+ */
+static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr)
+{
+ u_offset_t offPage;
+ seg_t KernelSeg;
+
+ /*
+ * 16777215 terabytes of total memory for all VMs or
+ * restart 8000 1GB VMs 2147483 times until wraparound!
+ */
+ mutex_enter(&g_OffsetMtx);
+ AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
+ g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE;
+ offPage = g_offPage;
+ mutex_exit(&g_OffsetMtx);
+
+ KernelSeg.s_as = &kas;
+ page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr);
+ if (RT_LIKELY(pPage))
+ {
+ /*
+ * Lock this page into memory "long term" to prevent this page from being paged out
+ * when we drop the page lock temporarily (during free). Downgrade to a shared lock
+ * to prevent page relocation.
+ */
+ page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
+ page_io_unlock(pPage);
+ page_downgrade(pPage);
+ Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
+ }
+
+ return pPage;
+}
+
+
+/**
+ * Destroys an allocated page.
+ *
+ * @param pPage Pointer to the page to be destroyed.
+ * @remarks This function expects page in @c pPage to be shared locked.
+ */
+static void rtR0MemObjSolPageDestroy(page_t *pPage)
+{
+ /*
+ * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails,
+ * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as
+ * we cannot touch any page_t members once the lock is dropped.
+ */
+ AssertPtr(pPage);
+ Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
+
+ u_offset_t offPage = pPage->p_offset;
+ int rc = page_tryupgrade(pPage);
+ if (!rc)
+ {
+ page_unlock(pPage);
+ page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL);
+
+ /*
+ * Since we allocated the pages as PG_NORELOC we should only get back the exact page always.
+ */
+ AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n",
+ &g_PageVnode, offPage, pFoundPage, pPage));
+ }
+ Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
+ page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
+ page_destroy(pPage, 0 /* move it to the free list */);
+}
+
+
+/* Currently not used on 32-bits, define it to shut up gcc. */
+#if HC_ARCH_BITS == 64
+/**
+ * Allocates physical, non-contiguous memory of pages.
+ *
+ * @param puPhys Where to store the physical address of first page. Optional,
+ * can be NULL.
+ * @param cb The size of the allocation.
+ *
+ * @return Array of allocated pages, NULL on failure.
+ */
+static page_t **rtR0MemObjSolPagesAlloc(uint64_t *puPhys, size_t cb)
+{
+ /*
+ * VM1:
+ * The page freelist and cachelist both hold pages that are not mapped into any address space.
+ * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
+ * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
+ *
+ * VM2:
+ * @todo Document what happens behind the scenes in VM2 regarding the free and cachelist.
+ */
+
+ /*
+ * Non-pageable memory reservation request for _4K pages, don't sleep.
+ */
+ size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int rc = page_resv(cPages, KM_NOSLEEP);
+ if (rc)
+ {
+ size_t cbPages = cPages * sizeof(page_t *);
+ page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
+ if (RT_LIKELY(ppPages))
+ {
+ /*
+ * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
+ * we don't yet have the 'virtAddr' to which this memory may be mapped.
+ */
+ caddr_t virtAddr = 0;
+ for (size_t i = 0; i < cPages; i++, virtAddr += PAGE_SIZE)
+ {
+ /*
+ * Get a page from the free list locked exclusively. The page will be named (hashed in)
+ * and we rely on it during free. The page we get will be shared locked to prevent the page
+ * from being relocated.
+ */
+ page_t *pPage = rtR0MemObjSolPageAlloc(virtAddr);
+ if (RT_UNLIKELY(!pPage))
+ {
+ /*
+ * No page found, release whatever pages we grabbed so far.
+ */
+ for (size_t k = 0; k < i; k++)
+ rtR0MemObjSolPageDestroy(ppPages[k]);
+ kmem_free(ppPages, cbPages);
+ page_unresv(cPages);
+ return NULL;
+ }
+
+ ppPages[i] = pPage;
+ }
+
+ if (puPhys)
+ *puPhys = rtR0MemObjSolPagePhys(ppPages[0]);
+ return ppPages;
+ }
+
+ page_unresv(cPages);
+ }
+
+ return NULL;
+}
+#endif /* HC_ARCH_BITS == 64 */
+
+
+/**
+ * Frees the allocates pages.
+ *
+ * @param ppPages Pointer to the page list.
+ * @param cbPages Size of the allocation.
+ */
+static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
+{
+ size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ size_t cbPages = cPages * sizeof(page_t *);
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ rtR0MemObjSolPageDestroy(ppPages[iPage]);
+
+ kmem_free(ppPages, cbPages);
+ page_unresv(cPages);
+}
+
+
+/**
+ * Allocates one large page.
+ *
+ * @param puPhys Where to store the physical address of the allocated
+ * page. Optional, can be NULL.
+ * @param cbLargePage Size of the large page.
+ *
+ * @returns Pointer to a list of pages that cover the large page, NULL on
+ * failure.
+ */
+static page_t **rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cbLargePage)
+{
+ /*
+ * Check PG_NORELOC support for large pages. Using this helps prevent _1G page
+ * fragementation on systems that support it.
+ */
+ static bool fPageNoRelocChecked = false;
+ if (fPageNoRelocChecked == false)
+ {
+ fPageNoRelocChecked = true;
+ g_fLargePageNoReloc = false;
+ if ( g_pfnrtR0Sol_page_noreloc_supported
+ && g_pfnrtR0Sol_page_noreloc_supported(cbLargePage))
+ {
+ g_fLargePageNoReloc = true;
+ }
+ }
+
+ /*
+ * Non-pageable memory reservation request for _4K pages, don't sleep.
+ */
+ size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ size_t cbPages = cPages * sizeof(page_t *);
+ u_offset_t offPage = 0;
+ int rc = page_resv(cPages, KM_NOSLEEP);
+ if (rc)
+ {
+ page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
+ if (RT_LIKELY(ppPages))
+ {
+ mutex_enter(&g_LargePageOffsetMtx);
+ AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
+ g_offLargePage = RT_ALIGN_64(g_offLargePage, cbLargePage) + cbLargePage;
+ offPage = g_offLargePage;
+ mutex_exit(&g_LargePageOffsetMtx);
+
+ seg_t KernelSeg;
+ KernelSeg.s_as = &kas;
+ page_t *pRootPage = page_create_va_large(&g_LargePageVnode, offPage, cbLargePage,
+ PG_EXCL | (g_fLargePageNoReloc ? PG_NORELOC : 0), &KernelSeg,
+ 0 /* vaddr */,NULL /* locality group */);
+ if (pRootPage)
+ {
+ /*
+ * Split it into sub-pages, downgrade each page to a shared lock to prevent page relocation.
+ */
+ page_t *pPageList = pRootPage;
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ {
+ page_t *pPage = pPageList;
+ AssertPtr(pPage);
+ AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
+ ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
+ AssertMsg(pPage->p_szc == pRootPage->p_szc, ("Size code mismatch %p %d %d\n", pPage,
+ (int)pPage->p_szc, (int)pRootPage->p_szc));
+
+ /*
+ * Lock the page into memory "long term". This prevents callers of page_try_demote_pages() (such as the
+ * pageout scanner) from demoting the large page into smaller pages while we temporarily release the
+ * exclusive lock (during free). We pass "0, 1" since we've already accounted for availrmem during
+ * page_resv().
+ */
+ page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
+
+ page_sub(&pPageList, pPage);
+ page_io_unlock(pPage);
+ page_downgrade(pPage);
+ Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
+
+ ppPages[iPage] = pPage;
+ }
+ Assert(pPageList == NULL);
+ Assert(ppPages[0] == pRootPage);
+
+ uint64_t uPhys = rtR0MemObjSolPagePhys(pRootPage);
+ AssertMsg(!(uPhys & (cbLargePage - 1)), ("%llx %zx\n", uPhys, cbLargePage));
+ if (puPhys)
+ *puPhys = uPhys;
+ return ppPages;
+ }
+
+ /*
+ * Don't restore offPrev in case of failure (race condition), we have plenty of offset space.
+ * The offset must be unique (for the same vnode) or we'll encounter panics on page_create_va_large().
+ */
+ kmem_free(ppPages, cbPages);
+ }
+
+ page_unresv(cPages);
+ }
+ return NULL;
+}
+
+
+/**
+ * Frees the large page.
+ *
+ * @param ppPages Pointer to the list of small pages that cover the
+ * large page.
+ * @param cbLargePage Size of the allocation (i.e. size of the large
+ * page).
+ */
+static void rtR0MemObjSolLargePageFree(page_t **ppPages, size_t cbLargePage)
+{
+ Assert(ppPages);
+ Assert(cbLargePage > PAGE_SIZE);
+
+ bool fDemoted = false;
+ size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ size_t cbPages = cPages * sizeof(page_t *);
+ page_t *pPageList = ppPages[0];
+
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ {
+ /*
+ * We need the pages exclusively locked, try upgrading the shared lock.
+ * If it fails, drop the shared page lock (cannot access any page_t members once this is done)
+ * and lookup the page from the page hash locking it exclusively.
+ */
+ page_t *pPage = ppPages[iPage];
+ u_offset_t offPage = pPage->p_offset;
+ int rc = page_tryupgrade(pPage);
+ if (!rc)
+ {
+ page_unlock(pPage);
+ page_t *pFoundPage = page_lookup(&g_LargePageVnode, offPage, SE_EXCL);
+ AssertRelease(pFoundPage);
+
+ if (g_fLargePageNoReloc)
+ {
+ /*
+ * This can only be guaranteed if PG_NORELOC is used while allocating the pages.
+ */
+ AssertReleaseMsg(pFoundPage == pPage,
+ ("lookup failed %p:%llu returned %p, expected %p\n", &g_LargePageVnode, offPage,
+ pFoundPage, pPage));
+ }
+
+ /*
+ * Check for page demotion (regardless of relocation). Some places in Solaris (e.g. VM1 page_retire())
+ * could possibly demote the large page to _4K pages between our call to page_unlock() and page_lookup().
+ */
+ if (page_get_pagecnt(pFoundPage->p_szc) == 1) /* Base size of only _4K associated with this page. */
+ fDemoted = true;
+ pPage = pFoundPage;
+ ppPages[iPage] = pFoundPage;
+ }
+ Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
+ page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
+ }
+
+ if (fDemoted)
+ {
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ {
+ Assert(page_get_pagecnt(ppPages[iPage]->p_szc) == 1);
+ page_destroy(ppPages[iPage], 0 /* move it to the free list */);
+ }
+ }
+ else
+ {
+ /*
+ * Although we shred the adjacent pages in the linked list, page_destroy_pages works on
+ * adjacent pages via array increments. So this does indeed free all the pages.
+ */
+ AssertPtr(pPageList);
+ page_destroy_pages(pPageList);
+ }
+ kmem_free(ppPages, cbPages);
+ page_unresv(cPages);
+}
+
+
+/**
+ * Unmaps kernel/user-space mapped memory.
+ *
+ * @param pv Pointer to the mapped memory block.
+ * @param cb Size of the memory block.
+ */
+static void rtR0MemObjSolUnmap(void *pv, size_t cb)
+{
+ if (SOL_IS_KRNL_ADDR(pv))
+ {
+ hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
+ vmem_free(heap_arena, pv, cb);
+ }
+ else
+ {
+ struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
+ AssertPtr(pAddrSpace);
+ as_rangelock(pAddrSpace);
+ as_unmap(pAddrSpace, pv, cb);
+ as_rangeunlock(pAddrSpace);
+ }
+}
+
+
+/**
+ * Lock down memory mappings for a virtual address.
+ *
+ * @param pv Pointer to the memory to lock down.
+ * @param cb Size of the memory block.
+ * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC)
+ *
+ * @returns IPRT status code.
+ */
+static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
+{
+ /*
+ * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
+ */
+ if (!SOL_IS_KRNL_ADDR(pv))
+ {
+ proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
+ AssertPtr(pProc);
+ faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
+ if (rc)
+ {
+ LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
+ return VERR_LOCK_FAILED;
+ }
+ }
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Unlock memory mappings for a virtual address.
+ *
+ * @param pv Pointer to the locked memory.
+ * @param cb Size of the memory block.
+ * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC).
+ */
+static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
+{
+ if (!SOL_IS_KRNL_ADDR(pv))
+ {
+ proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
+ AssertPtr(pProcess);
+ as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
+ }
+}
+
+
+/**
+ * Maps a list of physical pages into user address space.
+ *
+ * @param pVirtAddr Where to store the virtual address of the mapping.
+ * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE,
+ * PROT_EXEC)
+ * @param paPhysAddrs Array of physical addresses to pages.
+ * @param cb Size of memory being mapped.
+ *
+ * @returns IPRT status code.
+ */
+static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb, size_t cbPageSize)
+{
+ struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
+ int rc = VERR_INTERNAL_ERROR;
+ SEGVBOX_CRARGS Args;
+
+ Args.paPhysAddrs = paPhysAddrs;
+ Args.fPageAccess = fPageAccess;
+ Args.cbPageSize = cbPageSize;
+
+ as_rangelock(pAddrSpace);
+ map_addr(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
+ if (*pVirtAddr != NULL)
+ rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
+ else
+ rc = ENOMEM;
+ as_rangeunlock(pAddrSpace);
+
+ return RTErrConvertFromErrno(rc);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
+{
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
+
+ switch (pMemSolaris->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_LOW:
+ rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+ break;
+
+ case RTR0MEMOBJTYPE_PHYS:
+ if (pMemSolaris->Core.u.Phys.fAllocated)
+ {
+ if (pMemSolaris->fLargePage)
+ rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
+ else
+ rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+ }
+ break;
+
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ if (pMemSolaris->fIndivPages)
+ rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
+ else
+ rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+ break;
+
+ case RTR0MEMOBJTYPE_PAGE:
+ ddi_umem_free(pMemSolaris->Cookie);
+ break;
+
+ case RTR0MEMOBJTYPE_LOCK:
+ rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
+ break;
+
+ case RTR0MEMOBJTYPE_MAPPING:
+ rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+ break;
+
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ {
+ if (pMemSolaris->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
+ vmem_xfree(heap_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+ else
+ AssertFailed();
+ break;
+ }
+
+ case RTR0MEMOBJTYPE_CONT: /* we don't use this type here. */
+ default:
+ AssertMsgFailed(("enmType=%d\n", pMemSolaris->Core.enmType));
+ return VERR_INTERNAL_ERROR;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ /* Create the object. */
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);
+ if (RT_UNLIKELY(!pMemSolaris))
+ return VERR_NO_MEMORY;
+
+ void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
+ if (RT_UNLIKELY(!pvMem))
+ {
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return VERR_NO_PAGE_MEMORY;
+ }
+
+ pMemSolaris->Core.pv = pvMem;
+ pMemSolaris->pvHandle = NULL;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ NOREF(fExecutable);
+
+ /* Create the object */
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);
+ if (!pMemSolaris)
+ return VERR_NO_MEMORY;
+
+ /* Allocate physically low page-aligned memory. */
+ uint64_t uPhysHi = _4G - 1;
+ void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */);
+ if (RT_UNLIKELY(!pvMem))
+ {
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return VERR_NO_LOW_MEMORY;
+ }
+ pMemSolaris->Core.pv = pvMem;
+ pMemSolaris->pvHandle = NULL;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
+{
+ NOREF(fExecutable);
+ return rtR0MemObjNativeAllocPhys(ppMem, cb, _4G - 1, PAGE_SIZE /* alignment */);
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
+{
+#if HC_ARCH_BITS == 64
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
+ if (RT_UNLIKELY(!pMemSolaris))
+ return VERR_NO_MEMORY;
+
+ if (PhysHighest == NIL_RTHCPHYS)
+ {
+ uint64_t PhysAddr = UINT64_MAX;
+ void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb);
+ if (!pvPages)
+ {
+ LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return VERR_NO_MEMORY;
+ }
+ Assert(PhysAddr != UINT64_MAX);
+ Assert(!(PhysAddr & PAGE_OFFSET_MASK));
+
+ pMemSolaris->Core.pv = NULL;
+ pMemSolaris->pvHandle = pvPages;
+ pMemSolaris->fIndivPages = true;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+ }
+ else
+ {
+ /*
+ * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages.
+ * We fall back to using contig_alloc().
+ */
+ uint64_t PhysAddr = UINT64_MAX;
+ void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */);
+ if (!pvMem)
+ {
+ LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest));
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return VERR_NO_MEMORY;
+ }
+ Assert(PhysAddr != UINT64_MAX);
+ Assert(!(PhysAddr & PAGE_OFFSET_MASK));
+
+ pMemSolaris->Core.pv = pvMem;
+ pMemSolaris->pvHandle = NULL;
+ pMemSolaris->fIndivPages = false;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+ }
+
+#else /* 32 bit: */
+ return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */
+#endif
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
+{
+ AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
+
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (RT_UNLIKELY(!pMemSolaris))
+ return VERR_NO_MEMORY;
+
+ /*
+ * Allocating one large page gets special treatment.
+ */
+ static uint32_t s_cbLargePage = UINT32_MAX;
+ if (s_cbLargePage == UINT32_MAX)
+ {
+ if (page_num_pagesizes() > 1)
+ ASMAtomicWriteU32(&s_cbLargePage, page_get_pagesize(1)); /* Page-size code 1 maps to _2M on Solaris x86/amd64. */
+ else
+ ASMAtomicWriteU32(&s_cbLargePage, 0);
+ }
+
+ uint64_t PhysAddr;
+ if ( cb == s_cbLargePage
+ && cb == uAlignment
+ && PhysHighest == NIL_RTHCPHYS)
+ {
+ /*
+ * Allocate one large page (backed by physically contiguous memory).
+ */
+ void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
+ if (RT_LIKELY(pvPages))
+ {
+ AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
+ pMemSolaris->Core.pv = NULL;
+ pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
+ pMemSolaris->Core.u.Phys.fAllocated = true;
+ pMemSolaris->pvHandle = pvPages;
+ pMemSolaris->fLargePage = true;
+
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+ }
+ }
+ else
+ {
+ /*
+ * Allocate physically contiguous memory aligned as specified.
+ */
+ AssertCompile(NIL_RTHCPHYS == UINT64_MAX); NOREF(RTASSERTVAR);
+ PhysAddr = PhysHighest;
+ void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
+ if (RT_LIKELY(pvMem))
+ {
+ Assert(!(PhysAddr & PAGE_OFFSET_MASK));
+ Assert(PhysAddr < PhysHighest);
+ Assert(PhysAddr + cb <= PhysHighest);
+
+ pMemSolaris->Core.pv = pvMem;
+ pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
+ pMemSolaris->Core.u.Phys.fAllocated = true;
+ pMemSolaris->pvHandle = NULL;
+ pMemSolaris->fLargePage = false;
+
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+ }
+ }
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return VERR_NO_CONT_MEMORY;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
+{
+ AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
+
+ /* Create the object. */
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+ if (!pMemSolaris)
+ return VERR_NO_MEMORY;
+
+ /* There is no allocation here, it needs to be mapped somewhere first. */
+ pMemSolaris->Core.u.Phys.fAllocated = false;
+ pMemSolaris->Core.u.Phys.PhysBase = Phys;
+ pMemSolaris->Core.u.Phys.uCachePolicy = uCachePolicy;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
+ RTR0PROCESS R0Process)
+{
+ AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
+ NOREF(fAccess);
+
+ /* Create the locking object */
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
+ if (!pMemSolaris)
+ return VERR_NO_MEMORY;
+
+ /* Lock down user pages. */
+ int fPageAccess = S_READ;
+ if (fAccess & RTMEM_PROT_WRITE)
+ fPageAccess = S_WRITE;
+ if (fAccess & RTMEM_PROT_EXEC)
+ fPageAccess = S_EXEC;
+ int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return rc;
+ }
+
+ /* Fill in the object attributes and return successfully. */
+ pMemSolaris->Core.u.Lock.R0Process = R0Process;
+ pMemSolaris->pvHandle = NULL;
+ pMemSolaris->fAccess = fPageAccess;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
+{
+ NOREF(fAccess);
+
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
+ if (!pMemSolaris)
+ return VERR_NO_MEMORY;
+
+ /* Lock down kernel pages. */
+ int fPageAccess = S_READ;
+ if (fAccess & RTMEM_PROT_WRITE)
+ fPageAccess = S_WRITE;
+ if (fAccess & RTMEM_PROT_EXEC)
+ fPageAccess = S_EXEC;
+ int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return rc;
+ }
+
+ /* Fill in the object attributes and return successfully. */
+ pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
+ pMemSolaris->pvHandle = NULL;
+ pMemSolaris->fAccess = fPageAccess;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
+{
+ PRTR0MEMOBJSOL pMemSolaris;
+
+ /*
+ * Use xalloc.
+ */
+ void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
+ NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
+ if (RT_UNLIKELY(!pv))
+ return VERR_NO_MEMORY;
+
+ /* Create the object. */
+ pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
+ if (!pMemSolaris)
+ {
+ LogRel(("rtR0MemObjNativeReserveKernel failed to alloc memory object.\n"));
+ vmem_xfree(heap_arena, pv, cb);
+ return VERR_NO_MEMORY;
+ }
+
+ pMemSolaris->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
+ RTR0PROCESS R0Process)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
+ unsigned fProt, size_t offSub, size_t cbSub)
+{
+ /* Fail if requested to do something we can't. */
+ AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
+ if (uAlignment > PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Use xalloc to get address space.
+ */
+ if (!cbSub)
+ cbSub = pMemToMap->cb;
+ void *pv = vmem_xalloc(heap_arena, cbSub, uAlignment, 0 /* phase */, 0 /* nocross */,
+ NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
+ if (RT_UNLIKELY(!pv))
+ return VERR_MAP_FAILED;
+
+ /*
+ * Load the pages from the other object into it.
+ */
+ uint32_t fAttr = HAT_UNORDERED_OK | HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
+ if (fProt & RTMEM_PROT_READ)
+ fAttr |= PROT_READ;
+ if (fProt & RTMEM_PROT_EXEC)
+ fAttr |= PROT_EXEC;
+ if (fProt & RTMEM_PROT_WRITE)
+ fAttr |= PROT_WRITE;
+ fAttr |= HAT_NOSYNC;
+
+ int rc = VINF_SUCCESS;
+ size_t off = 0;
+ while (off < cbSub)
+ {
+ RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(pMemToMap, (offSub + offSub) >> PAGE_SHIFT);
+ AssertBreakStmt(HCPhys != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_2);
+ pfn_t pfn = HCPhys >> PAGE_SHIFT;
+ AssertBreakStmt(((RTHCPHYS)pfn << PAGE_SHIFT) == HCPhys, rc = VERR_INTERNAL_ERROR_3);
+
+ hat_devload(kas.a_hat, (uint8_t *)pv + off, PAGE_SIZE, pfn, fAttr, HAT_LOAD_LOCK);
+
+ /* Advance. */
+ off += PAGE_SIZE;
+ }
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Create a memory object for the mapping.
+ */
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cbSub);
+ if (pMemSolaris)
+ {
+ pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
+ *ppMem = &pMemSolaris->Core;
+ return VINF_SUCCESS;
+ }
+
+ LogRel(("rtR0MemObjNativeMapKernel failed to alloc memory object.\n"));
+ rc = VERR_NO_MEMORY;
+ }
+
+ if (off)
+ hat_unload(kas.a_hat, pv, off, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
+ vmem_xfree(heap_arena, pv, cbSub);
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, PRTR0MEMOBJINTERNAL pMemToMap, RTR3PTR R3PtrFixed,
+ size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
+{
+ /*
+ * Fend off things we cannot do.
+ */
+ AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
+ AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
+ if (uAlignment != PAGE_SIZE)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Get parameters from the source object.
+ */
+ PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap;
+ void *pv = pMemToMapSolaris->Core.pv;
+ size_t cb = pMemToMapSolaris->Core.cb;
+ size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ /*
+ * Create the mapping object
+ */
+ PRTR0MEMOBJSOL pMemSolaris;
+ pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb);
+ if (RT_UNLIKELY(!pMemSolaris))
+ return VERR_NO_MEMORY;
+
+ int rc = VINF_SUCCESS;
+ uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP);
+ if (RT_LIKELY(paPhysAddrs))
+ {
+ /*
+ * Prepare the pages for mapping according to type.
+ */
+ if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC
+ && pMemToMapSolaris->fIndivPages)
+ {
+ page_t **ppPages = pMemToMapSolaris->pvHandle;
+ AssertPtr(ppPages);
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ paPhysAddrs[iPage] = rtR0MemObjSolPagePhys(ppPages[iPage]);
+ }
+ else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
+ && pMemToMapSolaris->fLargePage)
+ {
+ RTHCPHYS Phys = pMemToMapSolaris->Core.u.Phys.PhysBase;
+ for (size_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
+ paPhysAddrs[iPage] = Phys;
+ }
+ else
+ {
+ /*
+ * Have kernel mapping, just translate virtual to physical.
+ */
+ AssertPtr(pv);
+ rc = VINF_SUCCESS;
+ for (size_t iPage = 0; iPage < cPages; iPage++)
+ {
+ paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pv);
+ if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
+ {
+ LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
+ rc = VERR_MAP_FAILED;
+ break;
+ }
+ pv = (void *)((uintptr_t)pv + PAGE_SIZE);
+ }
+ }
+ if (RT_SUCCESS(rc))
+ {
+ unsigned fPageAccess = PROT_READ;
+ if (fProt & RTMEM_PROT_WRITE)
+ fPageAccess |= PROT_WRITE;
+ if (fProt & RTMEM_PROT_EXEC)
+ fPageAccess |= PROT_EXEC;
+
+ /*
+ * Perform the actual mapping.
+ */
+ caddr_t UserAddr = NULL;
+ rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb, PAGE_SIZE);
+ if (RT_SUCCESS(rc))
+ {
+ pMemSolaris->Core.u.Mapping.R0Process = R0Process;
+ pMemSolaris->Core.pv = UserAddr;
+
+ *ppMem = &pMemSolaris->Core;
+ kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
+ return VINF_SUCCESS;
+ }
+
+ LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
+ }
+
+ rc = VERR_MAP_FAILED;
+ kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ rtR0MemObjDelete(&pMemSolaris->Core);
+ return rc;
+}
+
+
+DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
+{
+ NOREF(pMem);
+ NOREF(offSub);
+ NOREF(cbSub);
+ NOREF(fProt);
+ return VERR_NOT_SUPPORTED;
+}
+
+
+DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
+{
+ PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
+
+ switch (pMemSolaris->Core.enmType)
+ {
+ case RTR0MEMOBJTYPE_PHYS_NC:
+ if ( pMemSolaris->Core.u.Phys.fAllocated
+ || !pMemSolaris->fIndivPages)
+ {
+ uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
+ return rtR0MemObjSolVirtToPhys(pb);
+ }
+ page_t **ppPages = pMemSolaris->pvHandle;
+ return rtR0MemObjSolPagePhys(ppPages[iPage]);
+
+ case RTR0MEMOBJTYPE_PAGE:
+ case RTR0MEMOBJTYPE_LOW:
+ case RTR0MEMOBJTYPE_LOCK:
+ {
+ uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
+ return rtR0MemObjSolVirtToPhys(pb);
+ }
+
+ /*
+ * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
+ * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
+ */
+ case RTR0MEMOBJTYPE_MAPPING:
+ return rtR0MemObjNativeGetPagePhysAddr(pMemSolaris->Core.uRel.Child.pParent, iPage);
+
+ case RTR0MEMOBJTYPE_CONT:
+ case RTR0MEMOBJTYPE_PHYS:
+ AssertFailed(); /* handled by the caller */
+ case RTR0MEMOBJTYPE_RES_VIRT:
+ default:
+ return NIL_RTHCPHYS;
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.h b/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.h
new file mode 100644
index 00000000..95b91a39
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.h
@@ -0,0 +1,324 @@
+/* $Id: memobj-r0drv-solaris.h $ */
+/** @file
+ * IPRT - Ring-0 Memory Objects - Segment driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2012-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_solaris_memobj_r0drv_solaris_h
+#define IPRT_INCLUDED_SRC_r0drv_solaris_memobj_r0drv_solaris_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+/*******************************************************************************
+* Header Files *
+*******************************************************************************/
+#include "the-solaris-kernel.h"
+
+
+/*******************************************************************************
+* Structures and Typedefs *
+*******************************************************************************/
+typedef struct SEGVBOX_CRARGS
+{
+ uint64_t *paPhysAddrs;
+ size_t cbPageSize;
+ uint_t fPageAccess;
+} SEGVBOX_CRARGS;
+typedef SEGVBOX_CRARGS *PSEGVBOX_CRARGS;
+
+typedef struct SEGVBOX_DATA
+{
+ uint_t fPageAccess;
+ size_t cbPageSize;
+} SEGVBOX_DATA;
+typedef SEGVBOX_DATA *PSEGVBOX_DATA;
+
+static struct seg_ops s_SegVBoxOps;
+static vnode_t s_segVBoxVnode;
+
+
+DECLINLINE(int) rtR0SegVBoxSolCreate(seg_t *pSeg, void *pvArgs)
+{
+ struct as *pAddrSpace = pSeg->s_as;
+ PSEGVBOX_CRARGS pArgs = pvArgs;
+ PSEGVBOX_DATA pData = kmem_zalloc(sizeof(*pData), KM_SLEEP);
+
+ AssertPtr(pAddrSpace);
+ AssertPtr(pArgs);
+ AssertPtr(pData);
+
+ /*
+ * Currently we only map _4K pages but this segment driver can handle any size
+ * supported by the Solaris HAT layer.
+ */
+ size_t cbPageSize = pArgs->cbPageSize;
+ size_t uPageShift = 0;
+ switch (cbPageSize)
+ {
+ case _4K: uPageShift = 12; break;
+ case _2M: uPageShift = 21; break;
+ default: AssertReleaseMsgFailed(("Unsupported page size for mapping cbPageSize=%llx\n", cbPageSize)); break;
+ }
+
+ hat_map(pAddrSpace->a_hat, pSeg->s_base, pSeg->s_size, HAT_MAP);
+ pData->fPageAccess = pArgs->fPageAccess | PROT_USER;
+ pData->cbPageSize = cbPageSize;
+
+ pSeg->s_ops = &s_SegVBoxOps;
+ pSeg->s_data = pData;
+
+ /*
+ * Now load and lock down the mappings to the physical addresses.
+ */
+ caddr_t virtAddr = pSeg->s_base;
+ pgcnt_t cPages = (pSeg->s_size + cbPageSize - 1) >> uPageShift;
+ for (pgcnt_t iPage = 0; iPage < cPages; ++iPage, virtAddr += cbPageSize)
+ {
+ hat_devload(pAddrSpace->a_hat, virtAddr, cbPageSize, pArgs->paPhysAddrs[iPage] >> uPageShift,
+ pData->fPageAccess | HAT_UNORDERED_OK, HAT_LOAD_LOCK);
+ }
+
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolDup(seg_t *pSrcSeg, seg_t *pDstSeg)
+{
+ /*
+ * Duplicate a segment and return the new segment in 'pDstSeg'.
+ */
+ PSEGVBOX_DATA pSrcData = pSrcSeg->s_data;
+ PSEGVBOX_DATA pDstData = kmem_zalloc(sizeof(*pDstData), KM_SLEEP);
+
+ AssertPtr(pDstData);
+ AssertPtr(pSrcData);
+
+ pDstData->fPageAccess = pSrcData->fPageAccess;
+ pDstData->cbPageSize = pSrcData->cbPageSize;
+ pDstSeg->s_ops = &s_SegVBoxOps;
+ pDstSeg->s_data = pDstData;
+
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolUnmap(seg_t *pSeg, caddr_t virtAddr, size_t cb)
+{
+ PSEGVBOX_DATA pData = pSeg->s_data;
+
+ AssertRelease(pData);
+ AssertReleaseMsg(virtAddr >= pSeg->s_base, ("virtAddr=%p s_base=%p\n", virtAddr, pSeg->s_base));
+ AssertReleaseMsg(virtAddr + cb <= pSeg->s_base + pSeg->s_size, ("virtAddr=%p cb=%llu s_base=%p s_size=%llu\n", virtAddr,
+ cb, pSeg->s_base, pSeg->s_size));
+ size_t cbPageOffset = pData->cbPageSize - 1;
+ AssertRelease(!(cb & cbPageOffset));
+ AssertRelease(!((uintptr_t)virtAddr & cbPageOffset));
+
+ if ( virtAddr != pSeg->s_base
+ || cb != pSeg->s_size)
+ {
+ return ENOTSUP;
+ }
+
+ hat_unload(pSeg->s_as->a_hat, virtAddr, cb, HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
+
+ seg_free(pSeg);
+ return 0;
+}
+
+
+static void rtR0SegVBoxSolFree(seg_t *pSeg)
+{
+ PSEGVBOX_DATA pData = pSeg->s_data;
+ kmem_free(pData, sizeof(*pData));
+}
+
+
+static int rtR0SegVBoxSolFault(struct hat *pHat, seg_t *pSeg, caddr_t virtAddr, size_t cb, enum fault_type FaultType,
+ enum seg_rw ReadWrite)
+{
+ /*
+ * We would demand fault if the (u)read() path would SEGOP_FAULT() on buffers mapped in via our
+ * segment driver i.e. prefaults before DMA. Don't fail in such case where we're called directly,
+ * see @bugref{5047}.
+ */
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolFaultA(seg_t *pSeg, caddr_t virtAddr)
+{
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolSetProt(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t fPageAccess)
+{
+ return EACCES;
+}
+
+
+static int rtR0SegVBoxSolCheckProt(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t fPageAccess)
+{
+ return EINVAL;
+}
+
+
+static int rtR0SegVBoxSolKluster(seg_t *pSeg, caddr_t virtAddr, ssize_t Delta)
+{
+ return -1;
+}
+
+
+static int rtR0SegVBoxSolSync(seg_t *pSeg, caddr_t virtAddr, size_t cb, int Attr, uint_t fFlags)
+{
+ return 0;
+}
+
+
+static size_t rtR0SegVBoxSolInCore(seg_t *pSeg, caddr_t virtAddr, size_t cb, char *pVec)
+{
+ PSEGVBOX_DATA pData = pSeg->s_data;
+ AssertRelease(pData);
+ size_t uPageOffset = pData->cbPageSize - 1;
+ size_t uPageMask = ~uPageOffset;
+ size_t cbLen = (cb + uPageOffset) & uPageMask;
+ for (virtAddr = 0; cbLen != 0; cbLen -= pData->cbPageSize, virtAddr += pData->cbPageSize)
+ *pVec++ = 1;
+ return cbLen;
+}
+
+
+static int rtR0SegVBoxSolLockOp(seg_t *pSeg, caddr_t virtAddr, size_t cb, int Attr, int Op, ulong_t *pLockMap, size_t off)
+{
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolGetProt(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t *pafPageAccess)
+{
+ PSEGVBOX_DATA pData = pSeg->s_data;
+ size_t iPage = seg_page(pSeg, virtAddr + cb) - seg_page(pSeg, virtAddr) + 1;
+ if (iPage)
+ {
+ do
+ {
+ iPage--;
+ pafPageAccess[iPage] = pData->fPageAccess;
+ } while (iPage);
+ }
+ return 0;
+}
+
+
+static u_offset_t rtR0SegVBoxSolGetOffset(seg_t *pSeg, caddr_t virtAddr)
+{
+ return ((uintptr_t)virtAddr - (uintptr_t)pSeg->s_base);
+}
+
+
+static int rtR0SegVBoxSolGetType(seg_t *pSeg, caddr_t virtAddr)
+{
+ return MAP_SHARED;
+}
+
+
+static int rtR0SegVBoxSolGetVp(seg_t *pSeg, caddr_t virtAddr, vnode_t **ppVnode)
+{
+ *ppVnode = &s_segVBoxVnode;
+ return 0;
+}
+
+
+static int rtR0SegVBoxSolAdvise(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t Behav /* wut? */)
+{
+ return 0;
+}
+
+
+static void rtR0SegVBoxSolDump(seg_t *pSeg)
+{
+ /* Nothing to do. */
+}
+
+
+static int rtR0SegVBoxSolPageLock(seg_t *pSeg, caddr_t virtAddr, size_t cb, page_t ***pppPage, enum lock_type LockType, enum seg_rw ReadWrite)
+{
+ return ENOTSUP;
+}
+
+
+static int rtR0SegVBoxSolSetPageSize(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t SizeCode)
+{
+ return ENOTSUP;
+}
+
+
+static int rtR0SegVBoxSolGetMemId(seg_t *pSeg, caddr_t virtAddr, memid_t *pMemId)
+{
+ return ENODEV;
+}
+
+
+static lgrp_mem_policy_info_t *rtR0SegVBoxSolGetPolicy(seg_t *pSeg, caddr_t virtAddr)
+{
+ return NULL;
+}
+
+
+static int rtR0SegVBoxSolCapable(seg_t *pSeg, segcapability_t Capab)
+{
+ return 0;
+}
+
+
+static struct seg_ops s_SegVBoxOps =
+{
+ rtR0SegVBoxSolDup,
+ rtR0SegVBoxSolUnmap,
+ rtR0SegVBoxSolFree,
+ rtR0SegVBoxSolFault,
+ rtR0SegVBoxSolFaultA,
+ rtR0SegVBoxSolSetProt,
+ rtR0SegVBoxSolCheckProt,
+ rtR0SegVBoxSolKluster,
+ NULL, /* swapout */
+ rtR0SegVBoxSolSync,
+ rtR0SegVBoxSolInCore,
+ rtR0SegVBoxSolLockOp,
+ rtR0SegVBoxSolGetProt,
+ rtR0SegVBoxSolGetOffset,
+ rtR0SegVBoxSolGetType,
+ rtR0SegVBoxSolGetVp,
+ rtR0SegVBoxSolAdvise,
+ rtR0SegVBoxSolDump,
+ rtR0SegVBoxSolPageLock,
+ rtR0SegVBoxSolSetPageSize,
+ rtR0SegVBoxSolGetMemId,
+ rtR0SegVBoxSolGetPolicy,
+ rtR0SegVBoxSolCapable
+};
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_solaris_memobj_r0drv_solaris_h */
+
diff --git a/src/VBox/Runtime/r0drv/solaris/memuserkernel-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/memuserkernel-r0drv-solaris.c
new file mode 100644
index 00000000..0531288e
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/memuserkernel-r0drv-solaris.c
@@ -0,0 +1,100 @@
+/* $Id: memuserkernel-r0drv-solaris.c $ */
+/** @file
+ * IPRT - User & Kernel Memory, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2009-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mem.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+
+
+RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb)
+{
+ int rc;
+ RT_ASSERT_INTS_ON();
+
+ rc = ddi_copyin((const char *)R3PtrSrc, pvDst, cb, 0 /*flags*/);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb)
+{
+ int rc;
+ RT_ASSERT_INTS_ON();
+
+ rc = ddi_copyout(pvSrc, (void *)R3PtrDst, cb, 0 /*flags*/);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(bool) RTR0MemUserIsValidAddr(RTR3PTR R3Ptr)
+{
+ return R3Ptr < kernelbase;
+}
+
+
+RTR0DECL(bool) RTR0MemKernelIsValidAddr(void *pv)
+{
+ return (uintptr_t)pv >= kernelbase;
+}
+
+
+RTR0DECL(bool) RTR0MemAreKrnlAndUsrDifferent(void)
+{
+ return true;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyFrom(void *pvDst, void const *pvSrc, size_t cb)
+{
+ int rc = kcopy(pvSrc, pvDst, cb);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
+
+RTR0DECL(int) RTR0MemKernelCopyTo(void *pvDst, void const *pvSrc, size_t cb)
+{
+ int rc = kcopy(pvSrc, pvDst, cb);
+ if (RT_LIKELY(rc == 0))
+ return VINF_SUCCESS;
+ return VERR_ACCESS_DENIED;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/modulestub-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/modulestub-r0drv-solaris.c
new file mode 100644
index 00000000..f0275002
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/modulestub-r0drv-solaris.c
@@ -0,0 +1,79 @@
+/* $Id: modulestub-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Ring-0 Solaris stubs
+ */
+
+/*
+ * Copyright (C) 2011-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <sys/modctl.h>
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static struct modlmisc g_rtModuleStubMisc =
+{
+ &mod_miscops, /* extern from kernel */
+ "platform agnostic module"
+};
+
+
+static struct modlinkage g_rtModuleStubModLinkage =
+{
+ MODREV_1, /* loadable module system revision */
+ {
+ &g_rtModuleStubMisc,
+ NULL /* terminate array of linkage structures */
+ }
+};
+
+
+
+int _init(void);
+int _init(void)
+{
+ /* Disable auto unloading. */
+ modctl_t *pModCtl = mod_getctl(&g_rtModuleStubModLinkage);
+ if (pModCtl)
+ pModCtl->mod_loadflags |= MOD_NOAUTOUNLOAD;
+
+ return mod_install(&g_rtModuleStubModLinkage);
+}
+
+
+int _fini(void);
+int _fini(void)
+{
+ return mod_remove(&g_rtModuleStubModLinkage);
+}
+
+
+int _info(struct modinfo *pModInfo);
+int _info(struct modinfo *pModInfo)
+{
+ return mod_info(&g_rtModuleStubModLinkage, pModInfo);
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/mp-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/mp-r0drv-solaris.c
new file mode 100644
index 00000000..36cd667a
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/mp-r0drv-solaris.c
@@ -0,0 +1,450 @@
+/* $Id: mp-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Multiprocessor, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/mp.h>
+#include <iprt/cpuset.h>
+#include <iprt/thread.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/err.h>
+#include "r0drv/mp-r0drv.h"
+
+typedef int FNRTMPSOLWORKER(void *pvUser1, void *pvUser2, void *pvUser3);
+typedef FNRTMPSOLWORKER *PFNRTMPSOLWORKER;
+
+
+RTDECL(bool) RTMpIsCpuWorkPending(void)
+{
+ return false;
+}
+
+
+RTDECL(RTCPUID) RTMpCpuId(void)
+{
+ return CPU->cpu_id;
+}
+
+
+RTDECL(int) RTMpCurSetIndex(void)
+{
+ return CPU->cpu_id;
+}
+
+
+RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
+{
+ return *pidCpu = CPU->cpu_id;
+}
+
+
+RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
+{
+ return idCpu < RTCPUSET_MAX_CPUS && idCpu <= max_cpuid ? idCpu : -1;
+}
+
+
+RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
+{
+ return (unsigned)iCpu <= max_cpuid ? iCpu : NIL_RTCPUID;
+}
+
+
+RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
+{
+ return max_cpuid;
+}
+
+
+RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
+{
+ /*
+ * We cannot query CPU status recursively, check cpu member from cached set.
+ */
+ if (idCpu >= ncpus)
+ return false;
+
+ return RTCpuSetIsMember(&g_rtMpSolCpuSet, idCpu);
+}
+
+
+RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
+{
+ return idCpu < ncpus;
+}
+
+
+RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
+{
+ RTCPUID idCpu;
+
+ RTCpuSetEmpty(pSet);
+ idCpu = RTMpGetMaxCpuId(); /* it's inclusive */
+ do
+ {
+ if (RTMpIsCpuPossible(idCpu))
+ RTCpuSetAdd(pSet, idCpu);
+ } while (idCpu-- > 0);
+
+ return pSet;
+}
+
+
+RTDECL(RTCPUID) RTMpGetCount(void)
+{
+ return ncpus;
+}
+
+
+RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
+{
+ /*
+ * We cannot query CPU status recursively, return the cached set.
+ */
+ *pSet = g_rtMpSolCpuSet;
+ return pSet;
+}
+
+
+RTDECL(RTCPUID) RTMpGetOnlineCount(void)
+{
+ RTCPUSET Set;
+ RTMpGetOnlineSet(&Set);
+ return RTCpuSetCount(&Set);
+}
+
+
+/**
+ * Wrapper to Solaris IPI infrastructure.
+ *
+ * @returns Solaris error code.
+ * @param pCpuSet Pointer to Solaris CPU set.
+ * @param pfnSolWorker Function to execute on target CPU(s).
+ * @param pArgs Pointer to RTMPARGS to pass to @a pfnSolWorker.
+ */
+static void rtMpSolCrossCall(PRTSOLCPUSET pCpuSet, PFNRTMPSOLWORKER pfnSolWorker, PRTMPARGS pArgs)
+{
+ AssertPtrReturnVoid(pCpuSet);
+ AssertPtrReturnVoid(pfnSolWorker);
+ AssertPtrReturnVoid(pCpuSet);
+
+ if (g_frtSolOldIPI)
+ {
+ if (g_frtSolOldIPIUlong)
+ {
+ g_rtSolXcCall.u.pfnSol_xc_call_old_ulong((xc_arg_t)pArgs, /* Arg to IPI function */
+ 0, /* Arg2, ignored */
+ 0, /* Arg3, ignored */
+ IPRT_SOL_X_CALL_HIPRI, /* IPI priority */
+ pCpuSet->auCpus[0], /* Target CPU(s) */
+ (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
+ }
+ else
+ {
+ g_rtSolXcCall.u.pfnSol_xc_call_old((xc_arg_t)pArgs, /* Arg to IPI function */
+ 0, /* Arg2, ignored */
+ 0, /* Arg3, ignored */
+ IPRT_SOL_X_CALL_HIPRI, /* IPI priority */
+ *pCpuSet, /* Target CPU set */
+ (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
+ }
+ }
+ else
+ {
+ g_rtSolXcCall.u.pfnSol_xc_call((xc_arg_t)pArgs, /* Arg to IPI function */
+ 0, /* Arg2 */
+ 0, /* Arg3 */
+ &pCpuSet->auCpus[0], /* Target CPU set */
+ (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
+ }
+}
+
+
+/**
+ * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
+ * for the RTMpOnAll API.
+ *
+ * @returns Solaris error code.
+ * @param uArgs Pointer to the RTMPARGS package.
+ * @param pvIgnored1 Ignored.
+ * @param pvIgnored2 Ignored.
+ */
+static int rtMpSolOnAllCpuWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)(uArg);
+
+ /*
+ * Solaris CPU cross calls execute on offline CPUs too. Check our CPU cache
+ * set and ignore if it's offline.
+ */
+ if (!RTMpIsCpuOnline(RTMpCpuId()))
+ return 0;
+
+ pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
+
+ NOREF(pvIgnored1);
+ NOREF(pvIgnored2);
+ return 0;
+}
+
+
+RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RTMPARGS Args;
+ RTSOLCPUSET CpuSet;
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RT_ASSERT_INTS_ON();
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = NIL_RTCPUID;
+ Args.cHits = 0;
+
+ for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
+ CpuSet.auCpus[i] = (ulong_t)-1L;
+
+ RTThreadPreemptDisable(&PreemptState);
+
+ rtMpSolCrossCall(&CpuSet, rtMpSolOnAllCpuWrapper, &Args);
+
+ RTThreadPreemptRestore(&PreemptState);
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
+ * for the RTMpOnOthers API.
+ *
+ * @returns Solaris error code.
+ * @param uArgs Pointer to the RTMPARGS package.
+ * @param pvIgnored1 Ignored.
+ * @param pvIgnored2 Ignored.
+ */
+static int rtMpSolOnOtherCpusWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)(uArg);
+ RTCPUID idCpu = RTMpCpuId();
+
+ Assert(idCpu != pArgs->idCpu);
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+
+ NOREF(pvIgnored1);
+ NOREF(pvIgnored2);
+ return 0;
+}
+
+
+RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RTMPARGS Args;
+ RTSOLCPUSET CpuSet;
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RT_ASSERT_INTS_ON();
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = RTMpCpuId();
+ Args.cHits = 0;
+
+ /* The caller is supposed to have disabled preemption, but take no chances. */
+ RTThreadPreemptDisable(&PreemptState);
+
+ for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
+ CpuSet.auCpus[0] = (ulong_t)-1L;
+ BT_CLEAR(CpuSet.auCpus, RTMpCpuId());
+
+ rtMpSolCrossCall(&CpuSet, rtMpSolOnOtherCpusWrapper, &Args);
+
+ RTThreadPreemptRestore(&PreemptState);
+
+ return VINF_SUCCESS;
+}
+
+
+
+/**
+ * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
+ * for the RTMpOnPair API.
+ *
+ * @returns Solaris error code.
+ * @param uArgs Pointer to the RTMPARGS package.
+ * @param pvIgnored1 Ignored.
+ * @param pvIgnored2 Ignored.
+ */
+static int rtMpSolOnPairCpuWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)(uArg);
+ RTCPUID idCpu = RTMpCpuId();
+
+ Assert(idCpu == pArgs->idCpu || idCpu == pArgs->idCpu2);
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ ASMAtomicIncU32(&pArgs->cHits);
+
+ NOREF(pvIgnored1);
+ NOREF(pvIgnored2);
+ return 0;
+}
+
+
+RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ int rc;
+ RTMPARGS Args;
+ RTSOLCPUSET CpuSet;
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+
+ AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
+ AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = idCpu1;
+ Args.idCpu2 = idCpu2;
+ Args.cHits = 0;
+
+ for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
+ CpuSet.auCpus[i] = 0;
+ BT_SET(CpuSet.auCpus, idCpu1);
+ BT_SET(CpuSet.auCpus, idCpu2);
+
+ /*
+ * Check that both CPUs are online before doing the broadcast call.
+ */
+ RTThreadPreemptDisable(&PreemptState);
+ if ( RTMpIsCpuOnline(idCpu1)
+ && RTMpIsCpuOnline(idCpu2))
+ {
+ rtMpSolCrossCall(&CpuSet, rtMpSolOnPairCpuWrapper, &Args);
+
+ Assert(Args.cHits <= 2);
+ if (Args.cHits == 2)
+ rc = VINF_SUCCESS;
+ else if (Args.cHits == 1)
+ rc = VERR_NOT_ALL_CPUS_SHOWED;
+ else if (Args.cHits == 0)
+ rc = VERR_CPU_OFFLINE;
+ else
+ rc = VERR_CPU_IPE_1;
+ }
+ /*
+ * A CPU must be present to be considered just offline.
+ */
+ else if ( RTMpIsCpuPresent(idCpu1)
+ && RTMpIsCpuPresent(idCpu2))
+ rc = VERR_CPU_OFFLINE;
+ else
+ rc = VERR_CPU_NOT_FOUND;
+
+ RTThreadPreemptRestore(&PreemptState);
+ return rc;
+}
+
+
+RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
+{
+ return true;
+}
+
+
+/**
+ * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
+ * for the RTMpOnSpecific API.
+ *
+ * @returns Solaris error code.
+ * @param uArgs Pointer to the RTMPARGS package.
+ * @param pvIgnored1 Ignored.
+ * @param pvIgnored2 Ignored.
+ */
+static int rtMpSolOnSpecificCpuWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
+{
+ PRTMPARGS pArgs = (PRTMPARGS)(uArg);
+ RTCPUID idCpu = RTMpCpuId();
+
+ Assert(idCpu == pArgs->idCpu);
+ pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
+ ASMAtomicIncU32(&pArgs->cHits);
+
+ NOREF(pvIgnored1);
+ NOREF(pvIgnored2);
+ return 0;
+}
+
+
+RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
+{
+ RTMPARGS Args;
+ RTSOLCPUSET CpuSet;
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RT_ASSERT_INTS_ON();
+
+ if (idCpu >= ncpus)
+ return VERR_CPU_NOT_FOUND;
+
+ if (RT_UNLIKELY(!RTMpIsCpuOnline(idCpu)))
+ return RTMpIsCpuPresent(idCpu) ? VERR_CPU_OFFLINE : VERR_CPU_NOT_FOUND;
+
+ Args.pfnWorker = pfnWorker;
+ Args.pvUser1 = pvUser1;
+ Args.pvUser2 = pvUser2;
+ Args.idCpu = idCpu;
+ Args.cHits = 0;
+
+ for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
+ CpuSet.auCpus[i] = 0;
+ BT_SET(CpuSet.auCpus, idCpu);
+
+ RTThreadPreemptDisable(&PreemptState);
+
+ rtMpSolCrossCall(&CpuSet, rtMpSolOnSpecificCpuWrapper, &Args);
+
+ RTThreadPreemptRestore(&PreemptState);
+
+ Assert(ASMAtomicUoReadU32(&Args.cHits) <= 1);
+
+ return ASMAtomicUoReadU32(&Args.cHits) == 1
+ ? VINF_SUCCESS
+ : VERR_CPU_NOT_FOUND;
+}
+
+
+RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
+{
+ return true;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/mpnotification-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/mpnotification-r0drv-solaris.c
new file mode 100644
index 00000000..c6a0f2ab
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/mpnotification-r0drv-solaris.c
@@ -0,0 +1,139 @@
+/* $Id: mpnotification-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Multiprocessor Event Notifications, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2008-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/errcore.h>
+#include <iprt/mp.h>
+#include <iprt/cpuset.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include "r0drv/mp-r0drv.h"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Whether CPUs are being watched or not. */
+static volatile bool g_fSolCpuWatch = false;
+/** Set of online cpus that is maintained by the MP callback.
+ * This avoids locking issues querying the set from the kernel as well as
+ * eliminating any uncertainty regarding the online status during the
+ * callback. */
+RTCPUSET g_rtMpSolCpuSet;
+
+/**
+ * Internal solaris representation for watching CPUs.
+ */
+typedef struct RTMPSOLWATCHCPUS
+{
+ /** Function pointer to Mp worker. */
+ PFNRTMPWORKER pfnWorker;
+ /** Argument to pass to the Mp worker. */
+ void *pvArg;
+} RTMPSOLWATCHCPUS;
+typedef RTMPSOLWATCHCPUS *PRTMPSOLWATCHCPUS;
+
+
+/**
+ * Solaris callback function for Mp event notification.
+ *
+ * @returns Solaris error code.
+ * @param CpuState The current event/state of the CPU.
+ * @param iCpu Which CPU is this event for.
+ * @param pvArg Ignored.
+ *
+ * @remarks This function assumes index == RTCPUID.
+ * We may -not- be firing on the CPU going online/offline and called
+ * with preemption enabled.
+ */
+static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg)
+{
+ RTMPEVENT enmMpEvent;
+
+ /*
+ * Update our CPU set structures first regardless of whether we've been
+ * scheduled on the right CPU or not, this is just atomic accounting.
+ */
+ if (CpuState == CPU_ON)
+ {
+ enmMpEvent = RTMPEVENT_ONLINE;
+ RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu);
+ }
+ else if (CpuState == CPU_OFF)
+ {
+ enmMpEvent = RTMPEVENT_OFFLINE;
+ RTCpuSetDel(&g_rtMpSolCpuSet, iCpu);
+ }
+ else
+ return 0;
+
+ rtMpNotificationDoCallbacks(enmMpEvent, iCpu);
+ NOREF(pvArg);
+ return 0;
+}
+
+
+DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
+{
+ if (ASMAtomicReadBool(&g_fSolCpuWatch) == true)
+ return VERR_WRONG_ORDER;
+
+ /*
+ * Register the callback building the online cpu set as we do so.
+ */
+ RTCpuSetEmpty(&g_rtMpSolCpuSet);
+
+ mutex_enter(&cpu_lock);
+ register_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */);
+
+ for (int i = 0; i < (int)RTMpGetCount(); ++i)
+ if (cpu_is_online(cpu[i]))
+ rtMpNotificationCpuEvent(CPU_ON, i, NULL /* pvArg */);
+
+ ASMAtomicWriteBool(&g_fSolCpuWatch, true);
+ mutex_exit(&cpu_lock);
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
+{
+ if (ASMAtomicReadBool(&g_fSolCpuWatch) == true)
+ {
+ mutex_enter(&cpu_lock);
+ unregister_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */);
+ ASMAtomicWriteBool(&g_fSolCpuWatch, false);
+ mutex_exit(&cpu_lock);
+ }
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/process-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/process-r0drv-solaris.c
new file mode 100644
index 00000000..a68e6dfe
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/process-r0drv-solaris.c
@@ -0,0 +1,49 @@
+/* $Id: process-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Process Management, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/process.h>
+
+
+
+RTDECL(RTPROCESS) RTProcSelf(void)
+{
+ return ddi_get_pid();
+}
+
+
+RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void)
+{
+ proc_t *pProcess = NULL;
+ drv_getparm(UPROCP, &pProcess);
+ return (RTR0PROCESS)pProcess;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/semevent-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/semevent-r0drv-solaris.c
new file mode 100644
index 00000000..1fcda034
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/semevent-r0drv-solaris.c
@@ -0,0 +1,347 @@
+/* $Id: semevent-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Single Release Event Semaphores, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENT_WITHOUT_REMAPPING
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/err.h>
+#include <iprt/list.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/thread.h>
+#include "internal/magics.h"
+#include "semeventwait-r0drv-solaris.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Waiter entry. Lives on the stack.
+ *
+ * @remarks Unfortunately, we cannot easily use cv_signal because we cannot
+ * distinguish between it and the spurious wakeups we get after fork.
+ * So, we keep an unprioritized FIFO with the sleeping threads.
+ */
+typedef struct RTSEMEVENTSOLENTRY
+{
+ /** The list node. */
+ RTLISTNODE Node;
+ /** The thread. */
+ kthread_t *pThread;
+ /** Set to @c true when waking up the thread by signal or destroy. */
+ uint32_t volatile fWokenUp;
+} RTSEMEVENTSOLENTRY;
+/** Pointer to waiter entry. */
+typedef RTSEMEVENTSOLENTRY *PRTSEMEVENTSOLENTRY;
+
+
+/**
+ * Solaris event semaphore.
+ */
+typedef struct RTSEMEVENTINTERNAL
+{
+ /** Magic value (RTSEMEVENT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The number of threads referencing this object. */
+ uint32_t volatile cRefs;
+ /** Set if the object is signalled when there are no waiters. */
+ bool fSignaled;
+ /** List of waiting and woken up threads. */
+ RTLISTANCHOR WaitList;
+ /** The Solaris mutex protecting this structure and pairing up the with the cv. */
+ kmutex_t Mtx;
+ /** The Solaris condition variable. */
+ kcondvar_t Cnd;
+} RTSEMEVENTINTERNAL, *PRTSEMEVENTINTERNAL;
+
+
+
+RTDECL(int) RTSemEventCreate(PRTSEMEVENT phEventSem)
+{
+ return RTSemEventCreateEx(phEventSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...)
+{
+ AssertCompile(sizeof(RTSEMEVENTINTERNAL) > sizeof(void *));
+ AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER);
+ Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL));
+ AssertPtrReturn(phEventSem, VERR_INVALID_POINTER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ pThis->u32Magic = RTSEMEVENT_MAGIC;
+ pThis->cRefs = 1;
+ pThis->fSignaled = false;
+ RTListInit(&pThis->WaitList);
+ mutex_init(&pThis->Mtx, "IPRT Event Semaphore", MUTEX_DRIVER, (void *)ipltospl(DISP_LEVEL));
+ cv_init(&pThis->Cnd, "IPRT CV", CV_DRIVER, NULL);
+
+ *phEventSem = pThis;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Retain a reference to the semaphore.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventSolRetain(PRTSEMEVENTINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+ NOREF(cRefs);
+}
+
+
+/**
+ * The destruct.
+ *
+ * @param pThis The semaphore.
+ */
+static void rtR0SemEventSolDtor(PRTSEMEVENTINTERNAL pThis)
+{
+ Assert(pThis->u32Magic != RTSEMEVENT_MAGIC);
+ cv_destroy(&pThis->Cnd);
+ mutex_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+}
+
+
+/**
+ * Release a reference, destroy the thing if necessary.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventSolRelease(PRTSEMEVENTINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ rtR0SemEventSolDtor(pThis);
+}
+
+
+RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem)
+{
+ /*
+ * Validate input.
+ */
+ PRTSEMEVENTINTERNAL pThis = hEventSem;
+ if (pThis == NIL_RTSEMEVENT)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ Assert(pThis->cRefs > 0);
+ RT_ASSERT_INTS_ON();
+
+ mutex_enter(&pThis->Mtx);
+
+ /*
+ * Invalidate the semaphore.
+ */
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENT_MAGIC);
+ ASMAtomicWriteBool(&pThis->fSignaled, false);
+
+ /*
+ * Abort and wake up all threads.
+ */
+ PRTSEMEVENTSOLENTRY pWaiter;
+ RTListForEach(&pThis->WaitList, pWaiter, RTSEMEVENTSOLENTRY, Node)
+ {
+ pWaiter->fWokenUp = true;
+ }
+ cv_broadcast(&pThis->Cnd);
+
+ /*
+ * Release the reference from RTSemEventCreateEx.
+ */
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventSolRelease(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem)
+{
+ PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem;
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+
+ rtR0SemEventSolRetain(pThis);
+ rtR0SemSolWaitEnterMutexWithUnpinningHack(&pThis->Mtx);
+
+ /*
+ * Wake up one thread.
+ */
+ ASMAtomicWriteBool(&pThis->fSignaled, true);
+
+ PRTSEMEVENTSOLENTRY pWaiter;
+ RTListForEach(&pThis->WaitList, pWaiter, RTSEMEVENTSOLENTRY, Node)
+ {
+ if (!pWaiter->fWokenUp)
+ {
+ pWaiter->fWokenUp = true;
+ setrun(pWaiter->pThread);
+ ASMAtomicWriteBool(&pThis->fSignaled, false);
+ break;
+ }
+ }
+
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventSolRelease(pThis);
+
+#ifdef DEBUG_ramshankar
+ /** See @bugref{6318} comment#11 */
+ return VINF_SUCCESS;
+#endif
+ RT_ASSERT_PREEMPT_CPUID();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventWaitEx.
+ * @param uTimeout See RTSemEventWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventSolWait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+
+ rtR0SemEventSolRetain(pThis);
+ mutex_enter(&pThis->Mtx);
+
+ /*
+ * In the signaled state?
+ */
+ int rc;
+ if (ASMAtomicCmpXchgBool(&pThis->fSignaled, false, true))
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait.
+ */
+ RTR0SEMSOLWAIT Wait;
+ rc = rtR0SemSolWaitInit(&Wait, fFlags, uTimeout);
+ if (RT_SUCCESS(rc))
+ {
+ RTSEMEVENTSOLENTRY Waiter; /* ASSUMES we won't get swapped out while waiting (TS_DONT_SWAP). */
+ Waiter.pThread = curthread;
+ Waiter.fWokenUp = false;
+ RTListAppend(&pThis->WaitList, &Waiter.Node);
+
+ for (;;)
+ {
+ /* The destruction test. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else
+ {
+ /* Check the exit conditions. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else if (Waiter.fWokenUp)
+ rc = VINF_SUCCESS;
+ else if (rtR0SemSolWaitHasTimedOut(&Wait))
+ rc = VERR_TIMEOUT;
+ else if (rtR0SemSolWaitWasInterrupted(&Wait))
+ rc = VERR_INTERRUPTED;
+ else
+ {
+ /* Do the wait and then recheck the conditions. */
+ rtR0SemSolWaitDoIt(&Wait, &pThis->Cnd, &pThis->Mtx, &Waiter.fWokenUp, false);
+ continue;
+ }
+ }
+ break;
+ }
+
+ rtR0SemSolWaitDelete(&Wait);
+ RTListNodeRemove(&Waiter.Node);
+ }
+ }
+
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventSolRelease(pThis);
+ return rc;
+}
+
+
+RTDECL(int) RTSemEventWaitEx(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventSolWait(hEventSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventSolWait(hEventSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+
+
+RTDECL(int) RTSemEventWaitExDebug(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventSolWait(hEventSem, fFlags, uTimeout, &SrcPos);
+}
+
+
+RTDECL(uint32_t) RTSemEventGetResolution(void)
+{
+ return rtR0SemSolWaitGetResolution();
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/semeventmulti-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/semeventmulti-r0drv-solaris.c
new file mode 100644
index 00000000..6f217bfa
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/semeventmulti-r0drv-solaris.c
@@ -0,0 +1,355 @@
+/* $Id: semeventmulti-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Multiple Release Event Semaphores, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMEVENTMULTI_WITHOUT_REMAPPING
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/err.h>
+#include <iprt/lockvalidator.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/thread.h>
+#include <iprt/time.h>
+#include "internal/magics.h"
+#include "semeventwait-r0drv-solaris.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** @name fStateAndGen values
+ * @{ */
+/** The state bit number. */
+#define RTSEMEVENTMULTISOL_STATE_BIT 0
+/** The state mask. */
+#define RTSEMEVENTMULTISOL_STATE_MASK RT_BIT_32(RTSEMEVENTMULTISOL_STATE_BIT)
+/** The generation mask. */
+#define RTSEMEVENTMULTISOL_GEN_MASK ~RTSEMEVENTMULTISOL_STATE_MASK
+/** The generation shift. */
+#define RTSEMEVENTMULTISOL_GEN_SHIFT 1
+/** The initial variable value. */
+#define RTSEMEVENTMULTISOL_STATE_GEN_INIT UINT32_C(0xfffffffc)
+/** @} */
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Solaris multiple release event semaphore.
+ */
+typedef struct RTSEMEVENTMULTIINTERNAL
+{
+ /** Magic value (RTSEMEVENTMULTI_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The number of references. */
+ uint32_t volatile cRefs;
+ /** The object state bit and generation counter.
+ * The generation counter is incremented every time the object is
+ * signalled. */
+ uint32_t volatile fStateAndGen;
+ /** The Solaris mutex protecting this structure and pairing up the with the cv. */
+ kmutex_t Mtx;
+ /** The Solaris condition variable. */
+ kcondvar_t Cnd;
+} RTSEMEVENTMULTIINTERNAL, *PRTSEMEVENTMULTIINTERNAL;
+
+
+
+RTDECL(int) RTSemEventMultiCreate(PRTSEMEVENTMULTI phEventMultiSem)
+{
+ return RTSemEventMultiCreateEx(phEventMultiSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
+}
+
+
+RTDECL(int) RTSemEventMultiCreateEx(PRTSEMEVENTMULTI phEventMultiSem, uint32_t fFlags, RTLOCKVALCLASS hClass,
+ const char *pszNameFmt, ...)
+{
+ AssertReturn(!(fFlags & ~RTSEMEVENTMULTI_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
+ AssertPtrReturn(phEventMultiSem, VERR_INVALID_POINTER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ AssertCompile(sizeof(RTSEMEVENTMULTIINTERNAL) > sizeof(void *));
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMEVENTMULTI_MAGIC;
+ pThis->cRefs = 1;
+ pThis->fStateAndGen = RTSEMEVENTMULTISOL_STATE_GEN_INIT;
+ mutex_init(&pThis->Mtx, "IPRT Multiple Release Event Semaphore", MUTEX_DRIVER, (void *)ipltospl(DISP_LEVEL));
+ cv_init(&pThis->Cnd, "IPRT CV", CV_DRIVER, NULL);
+
+ *phEventMultiSem = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * Retain a reference to the semaphore.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiSolRetain(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs);
+ Assert(cRefs && cRefs < 100000);
+ NOREF(cRefs);
+}
+
+
+/**
+ * Destructor that is called when cRefs == 0.
+ *
+ * @param pThis The instance to destroy.
+ */
+static void rtSemEventMultiDtor(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ Assert(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC);
+ cv_destroy(&pThis->Cnd);
+ mutex_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+}
+
+
+/**
+ * Release a reference, destroy the thing if necessary.
+ *
+ * @param pThis The semaphore.
+ */
+DECLINLINE(void) rtR0SemEventMultiSolRelease(PRTSEMEVENTMULTIINTERNAL pThis)
+{
+ if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
+ rtSemEventMultiDtor(pThis);
+}
+
+
+
+RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ if (pThis == NIL_RTSEMEVENTMULTI)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->cRefs > 0, ("pThis=%p cRefs=%d\n", pThis, pThis->cRefs), VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+
+ mutex_enter(&pThis->Mtx);
+
+ /* Invalidate the handle and wake up all threads that might be waiting on the semaphore. */
+ Assert(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC);
+ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMEVENTMULTI_MAGIC_DEAD);
+ ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTISOL_GEN_MASK);
+ cv_broadcast(&pThis->Cnd);
+
+ /* Drop the reference from RTSemEventMultiCreateEx. */
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventMultiSolRelease(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC,
+ ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+ rtR0SemEventMultiSolRetain(pThis);
+ rtR0SemSolWaitEnterMutexWithUnpinningHack(&pThis->Mtx);
+ Assert(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC);
+
+ /*
+ * Do the job.
+ */
+ uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ fNew += 1 << RTSEMEVENTMULTISOL_GEN_SHIFT;
+ fNew |= RTSEMEVENTMULTISOL_STATE_MASK;
+ ASMAtomicWriteU32(&pThis->fStateAndGen, fNew);
+
+ cv_broadcast(&pThis->Cnd);
+
+ mutex_exit(&pThis->Mtx);
+
+ rtR0SemEventMultiSolRelease(pThis);
+#ifdef DEBUG_ramshankar
+ /** See @bugref{6318#c11}. */
+ return VINF_SUCCESS;
+#endif
+ RT_ASSERT_PREEMPT_CPUID();
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
+{
+ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC,
+ ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+
+ rtR0SemEventMultiSolRetain(pThis);
+ rtR0SemSolWaitEnterMutexWithUnpinningHack(&pThis->Mtx);
+ Assert(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC);
+
+ /*
+ * Do the job (could be done without the lock, but play safe).
+ */
+ ASMAtomicAndU32(&pThis->fStateAndGen, ~RTSEMEVENTMULTISOL_STATE_MASK);
+
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventMultiSolRelease(pThis);
+
+#ifdef DEBUG_ramshankar
+ /** See @bugref{6318#c11}. */
+ return VINF_SUCCESS;
+#endif
+ RT_ASSERT_PREEMPT_CPUID();
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug.
+ *
+ * @returns VBox status code.
+ * @param pThis The event semaphore.
+ * @param fFlags See RTSemEventMultiWaitEx.
+ * @param uTimeout See RTSemEventMultiWaitEx.
+ * @param pSrcPos The source code position of the wait.
+ */
+static int rtR0SemEventMultiSolWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
+ PCRTLOCKVALSRCPOS pSrcPos)
+{
+ uint32_t fOrgStateAndGen;
+ int rc;
+
+ /*
+ * Validate the input.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER);
+ AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
+ rtR0SemEventMultiSolRetain(pThis);
+ mutex_enter(&pThis->Mtx); /* this could be moved down to the else, but play safe for now. */
+
+ /*
+ * Is the event already signalled or do we have to wait?
+ */
+ fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen);
+ if (fOrgStateAndGen & RTSEMEVENTMULTISOL_STATE_MASK)
+ rc = VINF_SUCCESS;
+ else
+ {
+ /*
+ * We have to wait.
+ */
+ RTR0SEMSOLWAIT Wait;
+ rc = rtR0SemSolWaitInit(&Wait, fFlags, uTimeout);
+ if (RT_SUCCESS(rc))
+ {
+ for (;;)
+ {
+ /* The destruction test. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else
+ {
+ /* Check the exit conditions. */
+ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
+ rc = VERR_SEM_DESTROYED;
+ else if (ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen)
+ rc = VINF_SUCCESS;
+ else if (rtR0SemSolWaitHasTimedOut(&Wait))
+ rc = VERR_TIMEOUT;
+ else if (rtR0SemSolWaitWasInterrupted(&Wait))
+ rc = VERR_INTERRUPTED;
+ else
+ {
+ /* Do the wait and then recheck the conditions. */
+ rtR0SemSolWaitDoIt(&Wait, &pThis->Cnd, &pThis->Mtx, &pThis->fStateAndGen, fOrgStateAndGen);
+ continue;
+ }
+ }
+ break;
+ }
+ rtR0SemSolWaitDelete(&Wait);
+ }
+ }
+
+ mutex_exit(&pThis->Mtx);
+ rtR0SemEventMultiSolRelease(pThis);
+ return rc;
+}
+
+
+
+RTDECL(int) RTSemEventMultiWaitEx(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout)
+{
+#ifndef RTSEMEVENT_STRICT
+ return rtR0SemEventMultiSolWait(hEventMultiSem, fFlags, uTimeout, NULL);
+#else
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
+ return rtR0SemEventMultiSolWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+#endif
+}
+
+
+RTDECL(int) RTSemEventMultiWaitExDebug(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout,
+ RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
+ return rtR0SemEventMultiSolWait(hEventMultiSem, fFlags, uTimeout, &SrcPos);
+}
+
+
+RTDECL(uint32_t) RTSemEventMultiGetResolution(void)
+{
+ return rtR0SemSolWaitGetResolution();
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h b/src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h
new file mode 100644
index 00000000..31424ec9
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h
@@ -0,0 +1,496 @@
+/* $Id: semeventwait-r0drv-solaris.h $ */
+/** @file
+ * IPRT - Solaris Ring-0 Driver Helpers for Event Semaphore Waits.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_solaris_semeventwait_r0drv_solaris_h
+#define IPRT_INCLUDED_SRC_r0drv_solaris_semeventwait_r0drv_solaris_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include "the-solaris-kernel.h"
+
+#include <iprt/err.h>
+#include <iprt/string.h>
+#include <iprt/time.h>
+
+
+/** The resolution (nanoseconds) specified when using timeout_generic. */
+#define RTR0SEMSOLWAIT_RESOLUTION 50000
+
+/** Disables the cyclic fallback code for old S10 installs - see @bugref{5342}.
+ * @todo Fixed by @bugref{5595}, can be reenabled after checking out
+ * CY_HIGH_LEVEL. */
+#define RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+
+#define SOL_THREAD_TINTR_PTR ((kthread_t **)((char *)curthread + g_offrtSolThreadIntrThread))
+
+
+/**
+ * Solaris semaphore wait structure.
+ */
+typedef struct RTR0SEMSOLWAIT
+{
+ /** The absolute timeout given as nanoseconds since the start of the
+ * monotonic clock. */
+ uint64_t uNsAbsTimeout;
+ /** The timeout in nanoseconds relative to the start of the wait. */
+ uint64_t cNsRelTimeout;
+ /** The native timeout value. */
+ union
+ {
+ /** The timeout (in ticks) when fHighRes is false. */
+ clock_t lTimeout;
+ } u;
+ /** Set if we use high resolution timeouts. */
+ bool fHighRes;
+ /** Set if it's an indefinite wait. */
+ bool fIndefinite;
+ /** Set if the waiting thread is ready to be woken up.
+ * Avoids false setrun() calls due to temporary mutex exits. */
+ bool volatile fWantWakeup;
+ /** Set if we've already timed out.
+ * Set by rtR0SemSolWaitDoIt or rtR0SemSolWaitHighResTimeout, read by
+ * rtR0SemSolWaitHasTimedOut. */
+ bool volatile fTimedOut;
+ /** Whether the wait was interrupted. */
+ bool fInterrupted;
+ /** Interruptible or uninterruptible wait. */
+ bool fInterruptible;
+ /** The thread to wake up. */
+ kthread_t *pThread;
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ /** Cylic timer ID (used by the timeout callback). */
+ cyclic_id_t idCy;
+#endif
+ /** The mutex associated with the condition variable wait. */
+ void volatile *pvMtx;
+} RTR0SEMSOLWAIT;
+/** Pointer to a solaris semaphore wait structure. */
+typedef RTR0SEMSOLWAIT *PRTR0SEMSOLWAIT;
+
+
+/**
+ * Initializes a wait.
+ *
+ * The caller MUST check the wait condition BEFORE calling this function or the
+ * timeout logic will be flawed.
+ *
+ * @returns VINF_SUCCESS or VERR_TIMEOUT.
+ * @param pWait The wait structure.
+ * @param fFlags The wait flags.
+ * @param uTimeout The timeout.
+ */
+DECLINLINE(int) rtR0SemSolWaitInit(PRTR0SEMSOLWAIT pWait, uint32_t fFlags, uint64_t uTimeout)
+{
+ /*
+ * Process the flags and timeout.
+ */
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
+ uTimeout = uTimeout < UINT64_MAX / RT_NS_1MS
+ ? uTimeout * RT_NS_1MS
+ : UINT64_MAX;
+ if (uTimeout == UINT64_MAX)
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ {
+ uint64_t u64Now;
+ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
+ {
+ if (uTimeout == 0)
+ return VERR_TIMEOUT;
+
+ u64Now = RTTimeSystemNanoTS();
+ pWait->cNsRelTimeout = uTimeout;
+ pWait->uNsAbsTimeout = u64Now + uTimeout;
+ if (pWait->uNsAbsTimeout < u64Now) /* overflow */
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ }
+ else
+ {
+ u64Now = RTTimeSystemNanoTS();
+ if (u64Now >= uTimeout)
+ return VERR_TIMEOUT;
+
+ pWait->cNsRelTimeout = uTimeout - u64Now;
+ pWait->uNsAbsTimeout = uTimeout;
+ }
+ }
+ }
+
+ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
+ {
+ pWait->fIndefinite = false;
+ if ( ( (fFlags & (RTSEMWAIT_FLAGS_NANOSECS | RTSEMWAIT_FLAGS_ABSOLUTE))
+ || pWait->cNsRelTimeout < UINT32_C(1000000000) / 100 /*Hz*/ * 4)
+#ifdef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ && g_pfnrtR0Sol_timeout_generic != NULL
+#endif
+ )
+ pWait->fHighRes = true;
+ else
+ {
+ uint64_t cTicks = NSEC_TO_TICK_ROUNDUP(uTimeout);
+ if (cTicks >= LONG_MAX)
+ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
+ else
+ {
+ pWait->u.lTimeout = cTicks;
+ pWait->fHighRes = false;
+ }
+ }
+ }
+
+ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
+ {
+ pWait->fIndefinite = true;
+ pWait->fHighRes = false;
+ pWait->uNsAbsTimeout = UINT64_MAX;
+ pWait->cNsRelTimeout = UINT64_MAX;
+ pWait->u.lTimeout = LONG_MAX;
+ }
+
+ pWait->fWantWakeup = false;
+ pWait->fTimedOut = false;
+ pWait->fInterrupted = false;
+ pWait->fInterruptible = !!(fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE);
+ pWait->pThread = curthread;
+ pWait->pvMtx = NULL;
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ pWait->idCy = CYCLIC_NONE;
+#endif
+
+ return VINF_SUCCESS;
+}
+
+
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+/**
+ * Cyclic timeout callback that sets the timeout indicator and wakes up the
+ * waiting thread.
+ *
+ * @param pvUser The wait structure.
+ */
+static void rtR0SemSolWaitHighResTimeout(void *pvUser)
+{
+ PRTR0SEMSOLWAIT pWait = (PRTR0SEMSOLWAIT)pvUser;
+ kthread_t *pThread = pWait->pThread;
+ kmutex_t *pMtx = (kmutex_t *)ASMAtomicReadPtr(&pWait->pvMtx);
+ if (VALID_PTR(pMtx))
+ {
+ /* Enter the mutex here to make sure the thread has gone to sleep
+ before we wake it up.
+ Note: Trying to take the cpu_lock here doesn't work. */
+ mutex_enter(pMtx);
+ if (mutex_owner(&cpu_lock) == curthread)
+ {
+ cyclic_remove(pWait->idCy);
+ pWait->idCy = CYCLIC_NONE;
+ }
+ bool const fWantWakeup = pWait->fWantWakeup;
+ ASMAtomicWriteBool(&pWait->fTimedOut, true);
+ mutex_exit(pMtx);
+
+ if (fWantWakeup)
+ setrun(pThread);
+ }
+}
+#endif
+
+
+/**
+ * Timeout callback that sets the timeout indicator and wakes up the waiting
+ * thread.
+ *
+ * @param pvUser The wait structure.
+ */
+static void rtR0SemSolWaitTimeout(void *pvUser)
+{
+ PRTR0SEMSOLWAIT pWait = (PRTR0SEMSOLWAIT)pvUser;
+ kthread_t *pThread = pWait->pThread;
+ kmutex_t *pMtx = (kmutex_t *)ASMAtomicReadPtr((void * volatile *)&pWait->pvMtx);
+ if (VALID_PTR(pMtx))
+ {
+ /* Enter the mutex here to make sure the thread has gone to sleep
+ before we wake it up. */
+ mutex_enter(pMtx);
+ bool const fWantWakeup = pWait->fWantWakeup;
+ ASMAtomicWriteBool(&pWait->fTimedOut, true);
+ mutex_exit(pMtx);
+
+ if (fWantWakeup)
+ setrun(pThread);
+ }
+}
+
+
+/**
+ * Do the actual wait.
+ *
+ * @param pWait The wait structure.
+ * @param pCnd The condition variable to wait on.
+ * @param pMtx The mutex related to the condition variable.
+ * The caller has entered this.
+ * @param pfState The state variable to check if have changed
+ * after leaving the mutex (spinlock).
+ * @param fCurState The current value of @a pfState. We'll return
+ * without sleeping if @a pfState doesn't hold
+ * this value after reacquiring the mutex.
+ *
+ * @remarks This must be call with the object mutex (spinlock) held.
+ */
+DECLINLINE(void) rtR0SemSolWaitDoIt(PRTR0SEMSOLWAIT pWait, kcondvar_t *pCnd, kmutex_t *pMtx,
+ uint32_t volatile *pfState, uint32_t const fCurState)
+{
+ union
+ {
+ callout_id_t idCo;
+ timeout_id_t idTom;
+ } u;
+
+ /*
+ * Arm the timeout callback.
+ *
+ * We will have to leave the mutex (spinlock) when doing this because S10
+ * (didn't check S11) will not correctly preserve PIL across calls to
+ * timeout_generic() - @bugref{5595}. We do it for all timeout methods to
+ * be on the safe side, the nice sideeffect of which is that it solves the
+ * lock inversion problem found in @bugref{5342}.
+ */
+ bool const fHasTimeout = !pWait->fIndefinite;
+ bool fGoToSleep = !fHasTimeout;
+ if (fHasTimeout)
+ {
+ pWait->fWantWakeup = false; /* only want fTimedOut */
+ ASMAtomicWritePtr(&pWait->pvMtx, pMtx); /* atomic is paranoia */
+ mutex_exit(pMtx);
+
+ if (pWait->fHighRes)
+ {
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ if (g_pfnrtR0Sol_timeout_generic != NULL)
+#endif
+ {
+ /*
+ * High resolution timeout - arm a high resolution timeout callback
+ * for waking up the thread at the desired time.
+ */
+ u.idCo = g_pfnrtR0Sol_timeout_generic(CALLOUT_REALTIME, rtR0SemSolWaitTimeout, pWait,
+ pWait->uNsAbsTimeout, RTR0SEMSOLWAIT_RESOLUTION,
+ CALLOUT_FLAG_ABSOLUTE);
+ }
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ else
+ {
+ /*
+ * High resolution timeout - arm a one-shot cyclic for waking up
+ * the thread at the desired time.
+ */
+ cyc_handler_t Cyh;
+ Cyh.cyh_arg = pWait;
+ Cyh.cyh_func = rtR0SemSolWaitHighResTimeout;
+ Cyh.cyh_level = CY_LOW_LEVEL; /// @todo try CY_LOCK_LEVEL and CY_HIGH_LEVEL?
+
+ cyc_time_t Cyt;
+ Cyt.cyt_when = pWait->uNsAbsTimeout;
+ Cyt.cyt_interval = UINT64_C(1000000000) * 60;
+
+ mutex_enter(&cpu_lock);
+ pWait->idCy = cyclic_add(&Cyh, &Cyt);
+ mutex_exit(&cpu_lock);
+ }
+#endif
+ }
+ else
+ {
+ /*
+ * Normal timeout.
+ * We're better off with our own callback like on the timeout man page,
+ * than calling cv_timedwait[_sig]().
+ */
+ u.idTom = realtime_timeout(rtR0SemSolWaitTimeout, pWait, pWait->u.lTimeout);
+ }
+
+ /*
+ * Reacquire the mutex and check if the sleep condition still holds and
+ * that we didn't already time out.
+ */
+ mutex_enter(pMtx);
+ pWait->fWantWakeup = true;
+ fGoToSleep = !ASMAtomicUoReadBool(&pWait->fTimedOut)
+ && ASMAtomicReadU32(pfState) == fCurState;
+ }
+
+ /*
+ * Do the waiting if that's still desirable.
+ * (rc > 0 - normal wake-up; rc == 0 - interruption; rc == -1 - timeout)
+ */
+ if (fGoToSleep)
+ {
+ if (pWait->fInterruptible)
+ {
+ int rc = cv_wait_sig(pCnd, pMtx);
+ if (RT_UNLIKELY(rc <= 0))
+ {
+ if (RT_LIKELY(rc == 0))
+ pWait->fInterrupted = true;
+ else
+ AssertMsgFailed(("rc=%d\n", rc)); /* no timeouts, see above! */
+ }
+ }
+ else
+ cv_wait(pCnd, pMtx);
+ }
+
+ /*
+ * Remove the timeout callback. Drop the lock while we're doing that
+ * to reduce lock contention / deadlocks. Before dropping the lock,
+ * indicate that the callback shouldn't do anything.
+ *
+ * (Too bad we are stuck with the cv_* API here, it's doing a little
+ * bit too much.)
+ */
+ if (fHasTimeout)
+ {
+ pWait->fWantWakeup = false;
+ ASMAtomicWritePtr(&pWait->pvMtx, NULL);
+ mutex_exit(pMtx);
+
+ if (pWait->fHighRes)
+ {
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ if (g_pfnrtR0Sol_timeout_generic != NULL)
+#endif
+ g_pfnrtR0Sol_untimeout_generic(u.idCo, 0 /*nowait*/);
+#ifndef RTR0SEMSOLWAIT_NO_OLD_S10_FALLBACK
+ else
+ {
+ mutex_enter(&cpu_lock);
+ if (pWait->idCy != CYCLIC_NONE)
+ {
+ cyclic_remove(pWait->idCy);
+ pWait->idCy = CYCLIC_NONE;
+ }
+ mutex_exit(&cpu_lock);
+ }
+#endif
+ }
+ else
+ untimeout(u.idTom);
+
+ mutex_enter(pMtx);
+ }
+}
+
+
+/**
+ * Checks if a solaris wait was interrupted.
+ *
+ * @returns true / false
+ * @param pWait The wait structure.
+ * @remarks This shall be called before the first rtR0SemSolWaitDoIt().
+ */
+DECLINLINE(bool) rtR0SemSolWaitWasInterrupted(PRTR0SEMSOLWAIT pWait)
+{
+ return pWait->fInterrupted;
+}
+
+
+/**
+ * Checks if a solaris wait has timed out.
+ *
+ * @returns true / false
+ * @param pWait The wait structure.
+ */
+DECLINLINE(bool) rtR0SemSolWaitHasTimedOut(PRTR0SEMSOLWAIT pWait)
+{
+ return pWait->fTimedOut;
+}
+
+
+/**
+ * Deletes a solaris wait.
+ *
+ * @param pWait The wait structure.
+ */
+DECLINLINE(void) rtR0SemSolWaitDelete(PRTR0SEMSOLWAIT pWait)
+{
+ pWait->pThread = NULL;
+}
+
+
+/**
+ * Enters the mutex, unpinning the underlying current thread if contended and
+ * we're on an interrupt thread.
+ *
+ * The unpinning is done to prevent a deadlock, see s this could lead to a
+ * deadlock (see @bugref{4259} for the full explanation)
+ *
+ * @param pMtx The mutex to enter.
+ */
+DECLINLINE(void) rtR0SemSolWaitEnterMutexWithUnpinningHack(kmutex_t *pMtx)
+{
+ int fAcquired = mutex_tryenter(pMtx);
+ if (!fAcquired)
+ {
+ /*
+ * Note! This assumes nobody is using the RTThreadPreemptDisable() in an
+ * interrupt context and expects it to work right. The swtch will
+ * result in a voluntary preemption. To fix this, we would have to
+ * do our own counting in RTThreadPreemptDisable/Restore() like we do
+ * on systems which doesn't do preemption (OS/2, linux, ...) and
+ * check whether preemption was disabled via RTThreadPreemptDisable()
+ * or not and only call swtch if RTThreadPreemptDisable() wasn't called.
+ */
+ kthread_t **ppIntrThread = SOL_THREAD_TINTR_PTR;
+ if ( *ppIntrThread
+ && getpil() < DISP_LEVEL)
+ {
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RTThreadPreemptDisable(&PreemptState);
+ preempt();
+ RTThreadPreemptRestore(&PreemptState);
+ }
+ mutex_enter(pMtx);
+ }
+}
+
+
+/**
+ * Gets the max resolution of the timeout machinery.
+ *
+ * @returns Resolution specified in nanoseconds.
+ */
+DECLINLINE(uint32_t) rtR0SemSolWaitGetResolution(void)
+{
+ return g_pfnrtR0Sol_timeout_generic != NULL
+ ? RTR0SEMSOLWAIT_RESOLUTION
+ : cyclic_getres();
+}
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_solaris_semeventwait_r0drv_solaris_h */
+
diff --git a/src/VBox/Runtime/r0drv/solaris/semfastmutex-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/semfastmutex-r0drv-solaris.c
new file mode 100644
index 00000000..4b161294
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/semfastmutex-r0drv-solaris.c
@@ -0,0 +1,120 @@
+/* $Id: semfastmutex-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Fast Mutex Semaphores, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mem.h>
+#include <iprt/thread.h>
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the Solaris mutex.
+ */
+typedef struct RTSEMFASTMUTEXINTERNAL
+{
+ /** Magic value (RTSEMFASTMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** The Solaris mutex. */
+ krwlock_t Mtx;
+} RTSEMFASTMUTEXINTERNAL, *PRTSEMFASTMUTEXINTERNAL;
+
+
+
+RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx)
+{
+ AssertCompile(sizeof(RTSEMFASTMUTEXINTERNAL) > sizeof(void *));
+ AssertPtrReturn(phFastMtx, VERR_INVALID_POINTER);
+ RT_ASSERT_PREEMPTIBLE();
+
+ PRTSEMFASTMUTEXINTERNAL pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (pThis)
+ {
+ pThis->u32Magic = RTSEMFASTMUTEX_MAGIC;
+ rw_init (&pThis->Mtx, "RWLOCK", RW_DRIVER, NULL);
+
+ *phFastMtx = pThis;
+ return VINF_SUCCESS;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ if (pThis == NIL_RTSEMFASTMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+
+ ASMAtomicXchgU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD);
+ rw_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRequest(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_PREEMPTIBLE();
+
+ rw_enter(&pThis->Mtx, RW_WRITER);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX hFastMtx)
+{
+ PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
+ RT_ASSERT_INTS_ON();
+
+ rw_exit(&pThis->Mtx);
+ return VINF_SUCCESS;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c
new file mode 100644
index 00000000..53194ab0
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c
@@ -0,0 +1,387 @@
+/* $Id: semmutex-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Mutex Semaphores, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTSEMMUTEX_WITHOUT_REMAPPING
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/semaphore.h>
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/mem.h>
+#include <iprt/err.h>
+#include <iprt/list.h>
+#include <iprt/thread.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the solaris semaphore structure.
+ */
+typedef struct RTSEMMUTEXINTERNAL
+{
+ /** Magic value (RTSEMMUTEX_MAGIC). */
+ uint32_t u32Magic;
+ /** The number of recursions. */
+ uint32_t cRecursions;
+ /** The number of threads waiting for the mutex. */
+ uint32_t volatile cWaiters;
+ /** The number of threads referencing us. */
+ uint32_t volatile cRefs;
+ /** The owner thread, NIL_RTNATIVETHREAD if none. */
+ RTNATIVETHREAD hOwnerThread;
+ /** The mutex object for synchronization. */
+ kmutex_t Mtx;
+ /** The condition variable for synchronization. */
+ kcondvar_t Cnd;
+} RTSEMMUTEXINTERNAL, *PRTSEMMUTEXINTERNAL;
+
+
+RTDECL(int) RTSemMutexCreate(PRTSEMMUTEX phMtx)
+{
+ /*
+ * Allocate.
+ */
+ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (RT_UNLIKELY(!pThis))
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize.
+ */
+ pThis->u32Magic = RTSEMMUTEX_MAGIC;
+ pThis->cRecursions = 0;
+ pThis->cWaiters = 0;
+ pThis->cRefs = 1;
+ pThis->hOwnerThread = NIL_RTNATIVETHREAD;
+ mutex_init(&pThis->Mtx, "IPRT Mutex", MUTEX_DRIVER, (void *)ipltospl(DISP_LEVEL));
+ cv_init(&pThis->Cnd, "IPRT CVM", CV_DRIVER, NULL);
+ *phMtx = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMtx)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMtx;
+
+ /*
+ * Validate.
+ */
+ if (pThis == NIL_RTSEMMUTEX)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+
+ mutex_enter(&pThis->Mtx);
+
+ ASMAtomicDecU32(&pThis->cRefs);
+
+ /*
+ * Invalidate the magic to indicate the mutex is being destroyed.
+ */
+ ASMAtomicIncU32(&pThis->u32Magic);
+ if (pThis->cWaiters > 0)
+ {
+ /*
+ * Wake up all waiters, last waiter thread cleans up.
+ */
+ cv_broadcast(&pThis->Cnd);
+ mutex_exit(&pThis->Mtx);
+ }
+ else if (pThis->cRefs == 0)
+ {
+ /*
+ * We're the last waiter, destroy.
+ */
+ mutex_exit(&pThis->Mtx);
+ cv_destroy(&pThis->Cnd);
+ mutex_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+ }
+ else
+ {
+ /*
+ * We're not the last waiting thread to be woken up. Just relinquish & bail.
+ */
+ mutex_exit(&pThis->Mtx);
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker for rtSemMutexSolRequest that handles the case where we go to sleep.
+ *
+ * @returns VINF_SUCCESS, VERR_INTERRUPTED, or VERR_SEM_DESTROYED.
+ * Returns without owning the mutex.
+ * @param pThis The mutex instance.
+ * @param cMillies The timeout, must be > 0 or RT_INDEFINITE_WAIT.
+ * @param fInterruptible The wait type.
+ *
+ * @remarks This needs to be called with the mutex object held!
+ */
+static int rtSemMutexSolRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
+ bool fInterruptible)
+{
+ int rc = VERR_GENERAL_FAILURE;
+ Assert(cMillies > 0);
+
+ /*
+ * Now we wait (sleep; although might spin and then sleep) & reference the mutex.
+ */
+ ASMAtomicIncU32(&pThis->cWaiters);
+ ASMAtomicIncU32(&pThis->cRefs);
+
+ if (cMillies != RT_INDEFINITE_WAIT)
+ {
+ clock_t cTicks = drv_usectohz((clock_t)(cMillies * 1000L));
+ clock_t cTimeout = ddi_get_lbolt();
+ cTimeout += cTicks;
+ if (fInterruptible)
+ rc = cv_timedwait_sig(&pThis->Cnd, &pThis->Mtx, cTimeout);
+ else
+ rc = cv_timedwait(&pThis->Cnd, &pThis->Mtx, cTimeout);
+ }
+ else
+ {
+ if (fInterruptible)
+ rc = cv_wait_sig(&pThis->Cnd, &pThis->Mtx);
+ else
+ {
+ cv_wait(&pThis->Cnd, &pThis->Mtx);
+ rc = 1;
+ }
+ }
+
+ ASMAtomicDecU32(&pThis->cWaiters);
+ if (rc > 0)
+ {
+ if (pThis->u32Magic == RTSEMMUTEX_MAGIC)
+ {
+ if (pThis->hOwnerThread == NIL_RTNATIVETHREAD)
+ {
+ /*
+ * Woken up by a release from another thread.
+ */
+ Assert(pThis->cRecursions == 0);
+ pThis->cRecursions = 1;
+ pThis->hOwnerThread = RTThreadNativeSelf();
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ /*
+ * Interrupted by some signal.
+ */
+ rc = VERR_INTERRUPTED;
+ }
+ }
+ else
+ {
+ /*
+ * Awakened due to the destruction-in-progress broadcast.
+ * We will cleanup if we're the last waiter.
+ */
+ rc = VERR_SEM_DESTROYED;
+ }
+ }
+ else if (rc == -1)
+ {
+ /*
+ * Timed out.
+ */
+ rc = VERR_TIMEOUT;
+ }
+ else
+ {
+ /*
+ * Condition may not have been met, returned due to pending signal.
+ */
+ rc = VERR_INTERRUPTED;
+ }
+
+ if (!ASMAtomicDecU32(&pThis->cRefs))
+ {
+ Assert(RT_FAILURE_NP(rc));
+ mutex_exit(&pThis->Mtx);
+ cv_destroy(&pThis->Cnd);
+ mutex_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+ return rc;
+ }
+
+ return rc;
+}
+
+
+/**
+ * Internal worker.
+ */
+DECLINLINE(int) rtSemMutexSolRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fInterruptible)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ int rc = VERR_GENERAL_FAILURE;
+
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+ Assert(pThis->cRefs >= 1);
+
+ /*
+ * Lock it and check if it's a recursion.
+ */
+ mutex_enter(&pThis->Mtx);
+ if (pThis->hOwnerThread == RTThreadNativeSelf())
+ {
+ pThis->cRecursions++;
+ Assert(pThis->cRecursions > 1);
+ Assert(pThis->cRecursions < 256);
+ rc = VINF_SUCCESS;
+ }
+ /*
+ * Not a recursion, claim the unowned mutex if we're there are no waiters.
+ */
+ else if ( pThis->hOwnerThread == NIL_RTNATIVETHREAD
+ && pThis->cWaiters == 0)
+ {
+ pThis->cRecursions = 1;
+ pThis->hOwnerThread = RTThreadNativeSelf();
+ rc = VINF_SUCCESS;
+ }
+ /*
+ * A polling call?
+ */
+ else if (cMillies == 0)
+ rc = VERR_TIMEOUT;
+ /*
+ * No, we really need to get to sleep.
+ */
+ else
+ rc = rtSemMutexSolRequestSleep(pThis, cMillies, fInterruptible);
+
+ mutex_exit(&pThis->Mtx);
+ return rc;
+}
+
+
+RTDECL(int) RTSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtSemMutexSolRequest(hMutexSem, cMillies, false /*fInterruptible*/);
+}
+
+
+RTDECL(int) RTSemMutexRequestDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ return RTSemMutexRequest(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
+{
+ return rtSemMutexSolRequest(hMutexSem, cMillies, true /*fInterruptible*/);
+}
+
+
+RTDECL(int) RTSemMutexRequestNoResumeDebug(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
+{
+ return RTSemMutexRequestNoResume(hMutexSem, cMillies);
+}
+
+
+RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMtx)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMtx;
+ int rc;
+
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
+
+ /*
+ * Take the lock and release one recursion.
+ */
+ mutex_enter(&pThis->Mtx);
+ if (pThis->hOwnerThread == RTThreadNativeSelf())
+ {
+ Assert(pThis->cRecursions > 0);
+ if (--pThis->cRecursions == 0)
+ {
+ pThis->hOwnerThread = NIL_RTNATIVETHREAD;
+
+ /*
+ * If there are any waiters, signal one of them.
+ */
+ if (pThis->cWaiters > 0)
+ cv_signal(&pThis->Cnd);
+ }
+ rc = VINF_SUCCESS;
+ }
+ else
+ rc = VERR_NOT_OWNER;
+
+ mutex_exit(&pThis->Mtx);
+ return rc;
+}
+
+
+RTDECL(bool) RTSemMutexIsOwned(RTSEMMUTEX hMutexSem)
+{
+ PRTSEMMUTEXINTERNAL pThis = hMutexSem;
+ bool fOwned = false;
+
+ /*
+ * Validate.
+ */
+ AssertPtrReturn(pThis, false);
+ AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), false);
+
+ /*
+ * Check if this is the owner.
+ */
+ mutex_enter(&pThis->Mtx);
+ fOwned = pThis->hOwnerThread != NIL_RTNATIVETHREAD;
+ mutex_exit(&pThis->Mtx);
+
+ return fOwned;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/spinlock-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/spinlock-r0drv-solaris.c
new file mode 100644
index 00000000..6fe758ed
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/spinlock-r0drv-solaris.c
@@ -0,0 +1,204 @@
+/* $Id: spinlock-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Spinlocks, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/spinlock.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/thread.h>
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * Wrapper for the struct mutex type.
+ */
+typedef struct RTSPINLOCKINTERNAL
+{
+ /** Spinlock magic value (RTSPINLOCK_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** Spinlock creation flags. */
+ uint32_t fFlags;
+ /** Saved interrupt flag. */
+ uint32_t volatile fIntSaved;
+ /** A Solaris spinlock. */
+ kmutex_t Mtx;
+#ifdef RT_MORE_STRICT
+ /** The idAssertCpu variable before acquring the lock for asserting after
+ * releasing the spinlock. */
+ RTCPUID volatile idAssertCpu;
+ /** The CPU that owns the lock. */
+ RTCPUID volatile idCpuOwner;
+#endif
+} RTSPINLOCKINTERNAL, *PRTSPINLOCKINTERNAL;
+
+
+
+RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER);
+
+ /*
+ * Allocate.
+ */
+ AssertCompile(sizeof(RTSPINLOCKINTERNAL) > sizeof(void *));
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Initialize & return.
+ */
+ pThis->u32Magic = RTSPINLOCK_MAGIC;
+ pThis->fFlags = fFlags;
+ pThis->fIntSaved = 0;
+ /** @todo Consider different PIL when not interrupt safe requirement. */
+ mutex_init(&pThis->Mtx, "IPRT Spinlock", MUTEX_SPIN, (void *)ipltospl(PIL_MAX));
+ *pSpinlock = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTSpinlockDestroy(RTSPINLOCK Spinlock)
+{
+ /*
+ * Validate input.
+ */
+ RT_ASSERT_INTS_ON();
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ if (!pThis)
+ return VERR_INVALID_PARAMETER;
+ AssertMsgReturn(pThis->u32Magic == RTSPINLOCK_MAGIC,
+ ("Invalid spinlock %p magic=%#x\n", pThis, pThis->u32Magic),
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Make the lock invalid and release the memory.
+ */
+ ASMAtomicIncU32(&pThis->u32Magic);
+ mutex_destroy(&pThis->Mtx);
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ RT_ASSERT_PREEMPT_CPUID_VAR();
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ uint32_t fIntSaved = ASMIntDisableFlags();
+#endif
+ mutex_enter(&pThis->Mtx);
+
+ /*
+ * Solaris 10 doesn't preserve the interrupt flag, but since we're at PIL_MAX we should be
+ * fine and not get interrupts while lock is held. Re-disable interrupts to not upset
+ * assertions & assumptions callers might have.
+ */
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ ASMIntDisable();
+#endif
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ Assert(!ASMIntAreEnabled());
+#endif
+ pThis->fIntSaved = fIntSaved;
+ }
+ else
+ {
+#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
+ bool fIntsOn = ASMIntAreEnabled();
+#endif
+
+ mutex_enter(&pThis->Mtx);
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ AssertMsg(fIntsOn == ASMIntAreEnabled(), ("fIntsOn=%RTbool\n", fIntsOn));
+#endif
+ }
+
+ RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
+}
+
+
+RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
+{
+ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
+ RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE_VARS();
+
+ AssertPtr(pThis);
+ Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
+ RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE(pThis);
+
+ if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
+ {
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ uint32_t fIntSaved = pThis->fIntSaved;
+ pThis->fIntSaved = 0;
+#endif
+ mutex_exit(&pThis->Mtx);
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ ASMSetFlags(fIntSaved);
+#endif
+ }
+ else
+ {
+#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
+ bool fIntsOn = ASMIntAreEnabled();
+#endif
+
+ mutex_exit(&pThis->Mtx);
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ AssertMsg(fIntsOn == ASMIntAreEnabled(), ("fIntsOn=%RTbool\n", fIntsOn));
+#endif
+ }
+
+ RT_ASSERT_PREEMPT_CPUID();
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h b/src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h
new file mode 100644
index 00000000..e844beed
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h
@@ -0,0 +1,216 @@
+/* $Id: the-solaris-kernel.h $ */
+/** @file
+ * IPRT - Include all necessary headers for the Solaris kernel.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_SRC_r0drv_solaris_the_solaris_kernel_h
+#define IPRT_INCLUDED_SRC_r0drv_solaris_the_solaris_kernel_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <sys/kmem.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/thread.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/sdt.h>
+#include <sys/schedctl.h>
+#include <sys/time.h>
+#include <sys/sysmacros.h>
+#include <sys/cmn_err.h>
+#include <sys/vmsystm.h>
+#include <sys/cyclic.h>
+#include <sys/class.h>
+#include <sys/cpuvar.h>
+#include <sys/archsystm.h>
+#include <sys/x_call.h> /* in platform dir */
+#include <sys/x86_archext.h>
+#include <vm/hat.h>
+#include <vm/seg_vn.h>
+#include <vm/seg_kmem.h>
+#include <vm/page.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/spl.h>
+#include <sys/archsystm.h>
+#include <sys/callo.h>
+#include <sys/kobj.h>
+#include <sys/ctf_api.h>
+#include <sys/modctl.h>
+#include <sys/proc.h>
+#include <sys/t_lock.h>
+
+#undef u /* /usr/include/sys/user.h:249:1 is where this is defined to (curproc->p_user). very cool. */
+
+#include <iprt/cdefs.h>
+#include <iprt/types.h>
+#include <iprt/dbg.h>
+
+RT_C_DECLS_BEGIN
+
+/* IPRT functions. */
+DECLHIDDEN(void *) rtR0SolMemAlloc(uint64_t cbPhysHi, uint64_t *puPhys, size_t cb, uint64_t cbAlign, bool fContig);
+DECLHIDDEN(void) rtR0SolMemFree(void *pv, size_t cb);
+
+
+/* Solaris functions. */
+typedef callout_id_t (*PFNSOL_timeout_generic)(int type, void (*func)(void *),
+ void *arg, hrtime_t expiration,
+ hrtime_t resultion, int flags);
+typedef hrtime_t (*PFNSOL_untimeout_generic)(callout_id_t id, int nowait);
+typedef int (*PFNSOL_cyclic_reprogram)(cyclic_id_t id, hrtime_t expiration);
+typedef void (*PFNSOL_contig_free)(void *addr, size_t size);
+typedef int (*PFNSOL_page_noreloc_supported)(size_t cbPageSize);
+
+/* IPRT globals. */
+extern bool g_frtSolSplSetsEIF;
+extern RTCPUSET g_rtMpSolCpuSet;
+extern PFNSOL_timeout_generic g_pfnrtR0Sol_timeout_generic;
+extern PFNSOL_untimeout_generic g_pfnrtR0Sol_untimeout_generic;
+extern PFNSOL_cyclic_reprogram g_pfnrtR0Sol_cyclic_reprogram;
+extern PFNSOL_contig_free g_pfnrtR0Sol_contig_free;
+extern PFNSOL_page_noreloc_supported g_pfnrtR0Sol_page_noreloc_supported;
+extern size_t g_offrtSolThreadPreempt;
+extern size_t g_offrtSolThreadIntrThread;
+extern size_t g_offrtSolThreadLock;
+extern size_t g_offrtSolThreadProc;
+extern size_t g_offrtSolThreadId;
+extern size_t g_offrtSolCpuPreempt;
+extern size_t g_offrtSolCpuForceKernelPreempt;
+extern bool g_frtSolInitDone;
+extern RTDBGKRNLINFO g_hKrnlDbgInfo;
+
+/*
+ * Workarounds for running on old versions of solaris with different cross call
+ * interfaces. If we find xc_init_cpu() in the kernel, then just use the
+ * defined interfaces for xc_call() from the include file where the xc_call()
+ * interfaces just takes a pointer to a ulong_t array. The array must be long
+ * enough to hold "ncpus" bits at runtime.
+
+ * The reason for the hacks is that using the type "cpuset_t" is pretty much
+ * impossible from code built outside the Solaris source repository that wants
+ * to run on multiple releases of Solaris.
+ *
+ * For old style xc_call()s, 32 bit solaris and older 64 bit versions use
+ * "ulong_t" as cpuset_t.
+ *
+ * Later versions of 64 bit Solaris used: struct {ulong_t words[x];}
+ * where "x" depends on NCPU.
+ *
+ * We detect the difference in 64 bit support by checking the kernel value of
+ * max_cpuid, which always holds the compiled value of NCPU - 1.
+ *
+ * If Solaris increases NCPU to more than 256, VBox will continue to work on
+ * all versions of Solaris as long as the number of installed CPUs in the
+ * machine is <= IPRT_SOLARIS_NCPUS. If IPRT_SOLARIS_NCPUS is increased, this
+ * code has to be re-written some to provide compatibility with older Solaris
+ * which expects cpuset_t to be based on NCPU==256 -- or we discontinue
+ * support of old Nevada/S10.
+ */
+#define IPRT_SOL_NCPUS 256
+#define IPRT_SOL_SET_WORDS (IPRT_SOL_NCPUS / (sizeof(ulong_t) * 8))
+#define IPRT_SOL_X_CALL_HIPRI (2) /* for Old Solaris interface */
+typedef struct RTSOLCPUSET
+{
+ ulong_t auCpus[IPRT_SOL_SET_WORDS];
+} RTSOLCPUSET;
+typedef RTSOLCPUSET *PRTSOLCPUSET;
+
+/* Avoid warnings even if it means more typing... */
+typedef struct RTR0FNSOLXCCALL
+{
+ union
+ {
+ void *(*pfnSol_xc_call) (xc_arg_t, xc_arg_t, xc_arg_t, ulong_t *, xc_func_t);
+ void *(*pfnSol_xc_call_old) (xc_arg_t, xc_arg_t, xc_arg_t, int, RTSOLCPUSET, xc_func_t);
+ void *(*pfnSol_xc_call_old_ulong)(xc_arg_t, xc_arg_t, xc_arg_t, int, ulong_t, xc_func_t);
+ } u;
+} RTR0FNSOLXCCALL;
+typedef RTR0FNSOLXCCALL *PRTR0FNSOLXCCALL;
+
+extern RTR0FNSOLXCCALL g_rtSolXcCall;
+extern bool g_frtSolOldIPI;
+extern bool g_frtSolOldIPIUlong;
+
+/*
+ * Thread-context hooks.
+ * Workarounds for older Solaris versions that did not have the exitctx() callback.
+ */
+typedef struct RTR0FNSOLTHREADCTX
+{
+ union
+ {
+ void *(*pfnSol_installctx) (kthread_t *pThread, void *pvArg,
+ void (*pfnSave)(void *pvArg),
+ void (*pfnRestore)(void *pvArg),
+ void (*pfnFork)(void *pvThread, void *pvThreadFork),
+ void (*pfnLwpCreate)(void *pvThread, void *pvThreadCreate),
+ void (*pfnExit)(void *pvThread),
+ void (*pfnFree)(void *pvArg, int fIsExec));
+
+ void *(*pfnSol_installctx_old) (kthread_t *pThread, void *pvArg,
+ void (*pfnSave)(void *pvArg),
+ void (*pfnRestore)(void *pvArg),
+ void (*pfnFork)(void *pvThread, void *pvThreadFork),
+ void (*pfnLwpCreate)(void *pvThread, void *pvThreadCreate),
+ void (*pfnFree)(void *pvArg, int fIsExec));
+ } Install;
+
+ union
+ {
+ int (*pfnSol_removectx) (kthread_t *pThread, void *pvArg,
+ void (*pfnSave)(void *pvArg),
+ void (*pfnRestore)(void *pvArg),
+ void (*pfnFork)(void *pvThread, void *pvThreadFork),
+ void (*pfnLwpCreate)(void *pvThread, void *pvThreadCreate),
+ void (*pfnExit)(void *pvThread),
+ void (*pfnFree)(void *pvArg, int fIsExec));
+
+ int (*pfnSol_removectx_old) (kthread_t *pThread, void *pvArg,
+ void (*pfnSave)(void *pvArg),
+ void (*pfnRestore)(void *pvArg),
+ void (*pfnFork)(void *pvThread, void *pvThreadFork),
+ void (*pfnLwpCreate)(void *pvThread, void *pvThreadCreate),
+ void (*pfnFree)(void *pvArg, int fIsExec));
+ } Remove;
+} RTR0FNSOLTHREADCTX;
+typedef RTR0FNSOLTHREADCTX *PRTR0FNSOLTHREADCTX;
+
+extern RTR0FNSOLTHREADCTX g_rtSolThreadCtx;
+extern bool g_frtSolOldThreadCtx;
+
+/* Solaris globals. */
+extern uintptr_t kernelbase;
+
+/* Misc stuff from newer kernels. */
+#ifndef CALLOUT_FLAG_ABSOLUTE
+# define CALLOUT_FLAG_ABSOLUTE 2
+#endif
+
+RT_C_DECLS_END
+
+#endif /* !IPRT_INCLUDED_SRC_r0drv_solaris_the_solaris_kernel_h */
+
diff --git a/src/VBox/Runtime/r0drv/solaris/thread-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/thread-r0drv-solaris.c
new file mode 100644
index 00000000..c4650db6
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/thread-r0drv-solaris.c
@@ -0,0 +1,185 @@
+/* $Id: thread-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Threads, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include <iprt/mp.h>
+
+#define SOL_THREAD_PREEMPT (*((char *)curthread + g_offrtSolThreadPreempt))
+#define SOL_CPU_RUNRUN (*((char *)CPU + g_offrtSolCpuPreempt))
+#define SOL_CPU_KPRUNRUN (*((char *)CPU + g_offrtSolCpuForceKernelPreempt))
+
+RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
+{
+ return (RTNATIVETHREAD)curthread;
+}
+
+
+static int rtR0ThreadSolSleepCommon(RTMSINTERVAL cMillies)
+{
+ clock_t cTicks;
+ RT_ASSERT_PREEMPTIBLE();
+
+ if (!cMillies)
+ {
+ RTThreadYield();
+ return VINF_SUCCESS;
+ }
+
+ if (cMillies != RT_INDEFINITE_WAIT)
+ cTicks = drv_usectohz((clock_t)(cMillies * 1000L));
+ else
+ cTicks = 0;
+
+ delay(cTicks);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadSolSleepCommon(cMillies);
+}
+
+
+RTDECL(int) RTThreadSleepNoLog(RTMSINTERVAL cMillies)
+{
+ return rtR0ThreadSolSleepCommon(cMillies);
+}
+
+
+RTDECL(bool) RTThreadYield(void)
+{
+ RT_ASSERT_PREEMPTIBLE();
+
+ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+ RTThreadPreemptDisable(&PreemptState);
+
+ char const bThreadPreempt = SOL_THREAD_PREEMPT;
+ char const bForcePreempt = SOL_CPU_KPRUNRUN;
+ bool fWillYield = false;
+ Assert(bThreadPreempt >= 1);
+
+ /*
+ * If we are the last preemption enabler for this thread and if force
+ * preemption is set on the CPU, only then we are guaranteed to be preempted.
+ */
+ if (bThreadPreempt == 1 && bForcePreempt != 0)
+ fWillYield = true;
+
+ RTThreadPreemptRestore(&PreemptState);
+ return fWillYield;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+ if (RT_UNLIKELY(g_frtSolInitDone == false))
+ {
+ cmn_err(CE_CONT, "!RTThreadPreemptIsEnabled called before RTR0Init!\n");
+ return true;
+ }
+
+ bool fThreadPreempt = false;
+ if (SOL_THREAD_PREEMPT == 0)
+ fThreadPreempt = true;
+
+ if (!fThreadPreempt)
+ return false;
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+ if (!ASMIntAreEnabled())
+ return false;
+#endif
+ if (getpil() >= DISP_LEVEL)
+ return false;
+ return true;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+
+ char const bPreempt = SOL_CPU_RUNRUN;
+ char const bForcePreempt = SOL_CPU_KPRUNRUN;
+ return (bPreempt != 0 || bForcePreempt != 0);
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
+{
+ /* yes, RTThreadPreemptIsPending is reliable. */
+ return true;
+}
+
+
+RTDECL(bool) RTThreadPreemptIsPossible(void)
+{
+ /* yes, kernel preemption is possible. */
+ return true;
+}
+
+
+RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+
+ SOL_THREAD_PREEMPT++;
+ Assert(SOL_THREAD_PREEMPT >= 1);
+
+ RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
+}
+
+
+RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
+{
+ AssertPtr(pState);
+ RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
+
+ Assert(SOL_THREAD_PREEMPT >= 1);
+ if (--SOL_THREAD_PREEMPT == 0 && SOL_CPU_RUNRUN != 0)
+ kpreempt(KPREEMPT_SYNC);
+}
+
+
+RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
+{
+ Assert(hThread == NIL_RTTHREAD);
+ return servicing_interrupt() ? true : false;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/thread2-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/thread2-r0drv-solaris.c
new file mode 100644
index 00000000..2fd81178
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/thread2-r0drv-solaris.c
@@ -0,0 +1,150 @@
+/* $Id: thread2-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Threads (Part 2), Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/thread.h>
+#include <iprt/process.h>
+
+#include <iprt/assert.h>
+#include <iprt/errcore.h>
+#include "internal/thread.h"
+
+#define SOL_THREAD_ID_PTR ((uint64_t *)((char *)curthread + g_offrtSolThreadId))
+#define SOL_THREAD_LOCKP_PTR ((disp_lock_t **)((char *)curthread + g_offrtSolThreadLock))
+
+DECLHIDDEN(int) rtThreadNativeInit(void)
+{
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(RTTHREAD) RTThreadSelf(void)
+{
+ return rtThreadGetByNative(RTThreadNativeSelf());
+}
+
+
+DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType)
+{
+ int iPriority;
+ disp_lock_t **ppDispLock;
+ switch (enmType)
+ {
+ case RTTHREADTYPE_INFREQUENT_POLLER: iPriority = 60; break;
+ case RTTHREADTYPE_EMULATION: iPriority = 66; break;
+ case RTTHREADTYPE_DEFAULT: iPriority = 72; break;
+ case RTTHREADTYPE_MSG_PUMP: iPriority = 78; break;
+ case RTTHREADTYPE_IO: iPriority = 84; break;
+ case RTTHREADTYPE_TIMER: iPriority = 99; break;
+ default:
+ AssertMsgFailed(("enmType=%d\n", enmType));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ Assert(curthread);
+ thread_lock(curthread);
+ thread_change_pri(curthread, iPriority, 0);
+
+ /*
+ * thread_unlock() is a macro calling disp_lock_exit() with the thread's dispatcher lock.
+ * We need to dereference the offset manually here (for S10, S11 compatibility) rather than
+ * using the macro.
+ */
+ ppDispLock = SOL_THREAD_LOCKP_PTR;
+ disp_lock_exit(*ppDispLock);
+
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(int) rtThreadNativeAdopt(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+ /* There is nothing special that needs doing here, but the
+ user really better know what he's cooking. */
+ return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtThreadNativeWaitKludge(PRTTHREADINT pThread)
+{
+ thread_join(pThread->tid);
+}
+
+
+DECLHIDDEN(void) rtThreadNativeDestroy(PRTTHREADINT pThread)
+{
+ NOREF(pThread);
+}
+
+
+/**
+ * Native thread main function.
+ *
+ * @param pvThreadInt The thread structure.
+ */
+static void rtThreadNativeMain(void *pvThreadInt)
+{
+ PRTTHREADINT pThreadInt = (PRTTHREADINT)pvThreadInt;
+
+ AssertCompile(sizeof(kt_did_t) == sizeof(pThreadInt->tid));
+ uint64_t *pu64ThrId = SOL_THREAD_ID_PTR;
+ pThreadInt->tid = *pu64ThrId;
+ rtThreadMain(pThreadInt, RTThreadNativeSelf(), &pThreadInt->szName[0]);
+ thread_exit();
+}
+
+
+DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
+{
+ kthread_t *pThread;
+ RT_ASSERT_PREEMPTIBLE();
+
+ pThreadInt->tid = UINT64_MAX;
+
+ pThread = thread_create(NULL, /* Stack, use base */
+ 0, /* Stack size */
+ rtThreadNativeMain, /* Thread function */
+ pThreadInt, /* Function data */
+ 0, /* Data size */
+ &p0, /* Process 0 handle */
+ TS_RUN, /* Ready to run */
+ minclsyspri /* Priority */
+ );
+ if (RT_LIKELY(pThread))
+ {
+ *pNativeThread = (RTNATIVETHREAD)pThread;
+ return VINF_SUCCESS;
+ }
+
+ return VERR_OUT_OF_RESOURCES;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/threadctxhooks-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/threadctxhooks-r0drv-solaris.c
new file mode 100644
index 00000000..fff12e96
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/threadctxhooks-r0drv-solaris.c
@@ -0,0 +1,349 @@
+/* $Id: threadctxhooks-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Thread Context Switching Hook, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2013-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+
+#include <iprt/mem.h>
+#include <iprt/assert.h>
+#include <iprt/thread.h>
+#include <iprt/errcore.h>
+#include <iprt/asm.h>
+#include <iprt/log.h>
+#include "internal/thread.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The internal hook object for solaris.
+ */
+typedef struct RTTHREADCTXHOOKINT
+{
+ /** Magic value (RTTHREADCTXHOOKINT_MAGIC). */
+ uint32_t volatile u32Magic;
+ /** The thread handle (owner) for which the context-hooks are registered. */
+ RTNATIVETHREAD hOwner;
+ /** Pointer to the registered callback function. */
+ PFNRTTHREADCTXHOOK pfnCallback;
+ /** User argument passed to the callback function. */
+ void *pvUser;
+ /** Whether the hook is enabled or not. */
+ bool volatile fEnabled;
+ /** Number of references to this object. */
+ uint32_t volatile cRefs;
+} RTTHREADCTXHOOKINT;
+typedef RTTHREADCTXHOOKINT *PRTTHREADCTXHOOKINT;
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** Validates a hook handle and returns rc if not valid. */
+#define RTTHREADCTX_VALID_RETURN_RC(pThis, rc) \
+ do { \
+ AssertPtrReturn((pThis), (rc)); \
+ AssertReturn((pThis)->u32Magic == RTTHREADCTXHOOKINT_MAGIC, (rc)); \
+ AssertReturn((pThis)->cRefs > 0, (rc)); \
+ } while (0)
+
+
+/**
+ * Hook function for the thread-save event.
+ *
+ * @param pvThreadCtxInt Opaque pointer to the internal hook object.
+ *
+ * @remarks Called with the with preemption disabled!
+ */
+static void rtThreadCtxHookSolOut(void *pvThreadCtxInt)
+{
+ PRTTHREADCTXHOOKINT pThis = (PRTTHREADCTXHOOKINT)pvThreadCtxInt;
+ AssertPtr(pThis);
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ Assert(pThis->cRefs > 0);
+
+ if (pThis->fEnabled)
+ {
+ Assert(pThis->pfnCallback);
+ pThis->pfnCallback(RTTHREADCTXEVENT_OUT, pThis->pvUser);
+ }
+}
+
+
+/**
+ * Hook function for the thread-restore event.
+ *
+ * @param pvThreadCtxInt Opaque pointer to the internal hook object.
+ *
+ * @remarks Called with preemption disabled!
+ */
+static void rtThreadCtxHookSolIn(void *pvThreadCtxInt)
+{
+ PRTTHREADCTXHOOKINT pThis = (PRTTHREADCTXHOOKINT)pvThreadCtxInt;
+ AssertPtr(pThis);
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ Assert(pThis->cRefs > 0);
+
+ if (pThis->fEnabled)
+ {
+ Assert(pThis->pfnCallback);
+ pThis->pfnCallback(RTTHREADCTXEVENT_IN, pThis->pvUser);
+ }
+}
+
+
+/**
+ * Hook function for the thread-free event.
+ *
+ * This is used for making sure the hook object is safely released - see
+ * RTThreadCtxHookRelease for details.
+ *
+ * @param pvThreadCtxInt Opaque pointer to the internal hook object.
+ * @param fIsExec Whether this event is triggered due to exec().
+ */
+static void rtThreadCtxHookSolFree(void *pvThreadCtxInt, int fIsExec)
+{
+ PRTTHREADCTXHOOKINT pThis = (PRTTHREADCTXHOOKINT)pvThreadCtxInt;
+ AssertPtrReturnVoid(pThis);
+ AssertMsgReturnVoid(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis));
+
+ uint32_t cRefs = ASMAtomicReadU32(&pThis->cRefs);
+ if (cRefs > 0)
+ {
+ cRefs = ASMAtomicDecU32(&pThis->cRefs);
+ if (!cRefs)
+ {
+ Assert(!pThis->fEnabled);
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTTHREADCTXHOOKINT_MAGIC);
+ RTMemFree(pThis);
+ }
+ }
+ else
+ {
+ /* Should never happen. */
+ AssertMsgFailed(("rtThreadCtxHookSolFree with cRefs=0 pThis=%p\n", pThis));
+ }
+}
+
+
+RTDECL(int) RTThreadCtxHookCreate(PRTTHREADCTXHOOK phCtxHook, uint32_t fFlags, PFNRTTHREADCTXHOOK pfnCallback, void *pvUser)
+{
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis;
+ Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
+ AssertReturn(fFlags == 0, VERR_INVALID_FLAGS);
+
+ /*
+ * Allocate and initialize a new hook.
+ */
+ pThis = (PRTTHREADCTXHOOKINT)RTMemAllocZ(sizeof(*pThis));
+ if (RT_UNLIKELY(!pThis))
+ return VERR_NO_MEMORY;
+ pThis->u32Magic = RTTHREADCTXHOOKINT_MAGIC;
+ pThis->hOwner = RTThreadNativeSelf();
+ pThis->pfnCallback = pfnCallback;
+ pThis->pvUser = pvUser;
+ pThis->fEnabled = false;
+ pThis->cRefs = 2; /* One reference for the thread, one for the caller. */
+
+ /*
+ * installctx() allocates memory and thus cannot be used in RTThreadCtxHookRegister() which can be used
+ * with preemption disabled. We allocate the context-hooks here and use 'fEnabled' to determine if we can
+ * invoke the consumer's hook or not.
+ */
+ if (g_frtSolOldThreadCtx)
+ {
+ g_rtSolThreadCtx.Install.pfnSol_installctx_old(curthread,
+ pThis,
+ rtThreadCtxHookSolOut, /* save */
+ rtThreadCtxHookSolIn, /* restore */
+ NULL, /* fork */
+ NULL, /* lwp_create */
+ rtThreadCtxHookSolFree);
+ }
+ else
+ {
+ g_rtSolThreadCtx.Install.pfnSol_installctx(curthread,
+ pThis,
+ rtThreadCtxHookSolOut, /* save */
+ rtThreadCtxHookSolIn, /* restore */
+ NULL, /* fork */
+ NULL, /* lwp_create */
+ NULL, /* exit */
+ rtThreadCtxHookSolFree);
+ }
+
+ *phCtxHook = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTThreadCtxHookDestroy(RTTHREADCTXHOOK hCtxHook)
+{
+ /*
+ * Validate input, ignoring NIL.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ if (pThis == NIL_RTTHREADCTXHOOK)
+ return VINF_SUCCESS;
+ RTTHREADCTX_VALID_RETURN_RC(hCtxHook, VERR_INVALID_HANDLE);
+ Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ Assert(!pThis->fEnabled || pThis->hOwner == RTThreadNativeSelf());
+
+ /*
+ * Make sure it's disabled.
+ */
+ ASMAtomicWriteBool(&pThis->fEnabled, false);
+
+ /*
+ * Decrement.
+ */
+ uint32_t cRefs = ASMAtomicDecU32(&pThis->cRefs);
+ if ( cRefs == 1
+ && pThis->hOwner == RTThreadNativeSelf())
+ {
+ /*
+ * removectx() will invoke rtThreadCtxHookSolFree() and there is no way to bypass it and still use
+ * rtThreadCtxHookSolFree() at the same time. Hence the convulated reference counting.
+ *
+ * When this function is called from the owner thread and is the last reference, we call removectx() which
+ * will invoke rtThreadCtxHookSolFree() with cRefs = 1 and that will then free the hook object.
+ *
+ * When the function is called from a different thread, we simply decrement the reference. Whenever the
+ * ring-0 thread dies, Solaris will call rtThreadCtxHookSolFree() which will free the hook object.
+ */
+ int rc;
+ if (g_frtSolOldThreadCtx)
+ {
+ rc = g_rtSolThreadCtx.Remove.pfnSol_removectx_old(curthread,
+ pThis,
+ rtThreadCtxHookSolOut, /* save */
+ rtThreadCtxHookSolIn, /* restore */
+ NULL, /* fork */
+ NULL, /* lwp_create */
+ rtThreadCtxHookSolFree);
+ }
+ else
+ {
+ rc = g_rtSolThreadCtx.Remove.pfnSol_removectx(curthread,
+ pThis,
+ rtThreadCtxHookSolOut, /* save */
+ rtThreadCtxHookSolIn, /* restore */
+ NULL, /* fork */
+ NULL, /* lwp_create */
+ NULL, /* exit */
+ rtThreadCtxHookSolFree);
+ }
+ AssertMsg(rc, ("removectx() failed. rc=%d\n", rc));
+ NOREF(rc);
+
+#if 0 /*def RT_STRICT - access after free */
+ cRefs = ASMAtomicReadU32(&pThis->cRefs);
+ Assert(!cRefs);
+#endif
+ cRefs = 0;
+ }
+ else if (!cRefs)
+ {
+ /*
+ * The ring-0 thread for this hook object has already died. Free up the object as we have no more references.
+ */
+ Assert(pThis->hOwner != RTThreadNativeSelf());
+ ASMAtomicWriteU32(&pThis->u32Magic, ~RTTHREADCTXHOOKINT_MAGIC);
+ RTMemFree(pThis);
+ }
+
+ return cRefs;
+}
+
+
+RTDECL(int) RTThreadCtxHookEnable(RTTHREADCTXHOOK hCtxHook)
+{
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ AssertPtr(pThis);
+ AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
+ VERR_INVALID_HANDLE);
+ Assert(pThis->hOwner == RTThreadNativeSelf());
+ Assert(!pThis->fEnabled);
+
+ /*
+ * Mark it as enabled.
+ */
+ pThis->fEnabled = true;
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTThreadCtxHookDisable(RTTHREADCTXHOOK hCtxHook)
+{
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ if (pThis != NIL_RTTHREADCTXHOOK)
+ {
+ AssertPtr(pThis);
+ AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
+ VERR_INVALID_HANDLE);
+ Assert(pThis->hOwner == RTThreadNativeSelf());
+
+ /*
+ * Mark it as disabled.
+ */
+ pThis->fEnabled = false;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(bool) RTThreadCtxHookIsEnabled(RTTHREADCTXHOOK hCtxHook)
+{
+ /*
+ * Validate input.
+ */
+ PRTTHREADCTXHOOKINT pThis = hCtxHook;
+ if (pThis == NIL_RTTHREADCTXHOOK)
+ return false;
+ AssertPtr(pThis);
+ AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
+ false);
+
+ return pThis->fEnabled;
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/time-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/time-r0drv-solaris.c
new file mode 100644
index 00000000..305afee8
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/time-r0drv-solaris.c
@@ -0,0 +1,70 @@
+/* $Id: time-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Time, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define RTTIME_INCL_TIMESPEC
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/time.h>
+
+
+RTDECL(uint64_t) RTTimeNanoTS(void)
+{
+ return (uint64_t)gethrtime();
+}
+
+
+RTDECL(uint64_t) RTTimeMilliTS(void)
+{
+ return RTTimeNanoTS() / RT_NS_1MS;
+}
+
+
+RTDECL(uint64_t) RTTimeSystemNanoTS(void)
+{
+ return RTTimeNanoTS();
+}
+
+
+RTDECL(uint64_t) RTTimeSystemMilliTS(void)
+{
+ return RTTimeNanoTS() / RT_NS_1MS;
+}
+
+
+RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
+{
+ timestruc_t TimeSpec;
+
+ mutex_enter(&tod_lock);
+ TimeSpec = tod_get();
+ mutex_exit(&tod_lock);
+ return RTTimeSpecSetNano(pTime, (uint64_t)TimeSpec.tv_sec * RT_NS_1SEC + TimeSpec.tv_nsec);
+}
+
diff --git a/src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c
new file mode 100644
index 00000000..af6b3757
--- /dev/null
+++ b/src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c
@@ -0,0 +1,650 @@
+/* $Id: timer-r0drv-solaris.c $ */
+/** @file
+ * IPRT - Timer, Ring-0 Driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "the-solaris-kernel.h"
+#include "internal/iprt.h"
+#include <iprt/timer.h>
+
+#include <iprt/asm.h>
+#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# include <iprt/asm-amd64-x86.h>
+#endif
+#include <iprt/assert.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/mp.h>
+#include <iprt/spinlock.h>
+#include <iprt/time.h>
+#include <iprt/thread.h>
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/**
+ * The internal representation of a Solaris timer handle.
+ */
+typedef struct RTTIMER
+{
+ /** Magic.
+ * This is RTTIMER_MAGIC, but changes to something else before the timer
+ * is destroyed to indicate clearly that thread should exit. */
+ uint32_t volatile u32Magic;
+ /** Reference counter. */
+ uint32_t volatile cRefs;
+ /** Flag indicating that the timer is suspended (hCyclicId should be
+ * CYCLIC_NONE). */
+ bool volatile fSuspended;
+ /** Flag indicating that the timer was suspended from the timer callback and
+ * therefore the hCyclicId may still be valid. */
+ bool volatile fSuspendedFromTimer;
+ /** Flag indicating that the timer interval was changed and that it requires
+ * manual expiration time programming for each callout. */
+ bool volatile fIntervalChanged;
+ /** Whether the timer must run on all CPUs or not. */
+ uint8_t fAllCpus;
+ /** Whether the timer must run on a specific CPU or not. */
+ uint8_t fSpecificCpu;
+ /** The CPU it must run on if fSpecificCpu is set. */
+ uint32_t iCpu;
+ /** The nano second interval for repeating timers. */
+ uint64_t volatile cNsInterval;
+ /** Cyclic timer Id. This is CYCLIC_NONE if no active timer.
+ * @remarks Please keep in mind that cyclic may call us back before the
+ * cyclic_add/cyclic_add_omni functions returns, so don't use this
+ * unguarded with cyclic_reprogram. */
+ cyclic_id_t hCyclicId;
+ /** The user callback. */
+ PFNRTTIMER pfnTimer;
+ /** The argument for the user callback. */
+ void *pvUser;
+ /** Union with timer type specific data. */
+ union
+ {
+ /** Single timer (fAllCpus == false). */
+ struct
+ {
+ /** Timer ticks. */
+ uint64_t u64Tick;
+ /** The next tick when fIntervalChanged is true, otherwise 0. */
+ uint64_t nsNextTick;
+ /** The (interrupt) thread currently active in the callback. */
+ kthread_t * volatile pActiveThread;
+ } Single;
+
+ /** Omni timer (fAllCpus == true). */
+ struct
+ {
+ /** Absolute timestamp of when the timer should fire first when starting up. */
+ uint64_t u64When;
+ /** Array of per CPU data (variable size). */
+ struct
+ {
+ /** Timer ticks (reinitialized when online'd). */
+ uint64_t u64Tick;
+ /** The (interrupt) thread currently active in the callback. */
+ kthread_t * volatile pActiveThread;
+ /** The next tick when fIntervalChanged is true, otherwise 0. */
+ uint64_t nsNextTick;
+ } aPerCpu[1];
+ } Omni;
+ } u;
+} RTTIMER;
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** Validates that the timer is valid. */
+#define RTTIMER_ASSERT_VALID_RET(pTimer) \
+ do \
+ { \
+ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); \
+ AssertMsgReturn((pTimer)->u32Magic == RTTIMER_MAGIC, ("pTimer=%p u32Magic=%x expected %x\n", (pTimer), (pTimer)->u32Magic, RTTIMER_MAGIC), \
+ VERR_INVALID_HANDLE); \
+ } while (0)
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void rtTimerSolSingleCallbackWrapper(void *pvArg);
+static void rtTimerSolStopIt(PRTTIMER pTimer);
+
+
+/**
+ * Retains a reference to the timer.
+ *
+ * @returns New reference counter value.
+ * @param pTimer The timer.
+ */
+DECLINLINE(uint32_t) rtTimerSolRetain(PRTTIMER pTimer)
+{
+ return ASMAtomicIncU32(&pTimer->cRefs);
+}
+
+
+/**
+ * Destroys the timer when the reference counter has reached zero.
+ *
+ * @returns 0 (new references counter value).
+ * @param pTimer The timer.
+ */
+static uint32_t rtTimeSolReleaseCleanup(PRTTIMER pTimer)
+{
+ Assert(pTimer->hCyclicId == CYCLIC_NONE);
+ ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
+ RTMemFree(pTimer);
+ return 0;
+}
+
+
+/**
+ * Releases a reference to the timer.
+ *
+ * @returns New reference counter value.
+ * @param pTimer The timer.
+ */
+DECLINLINE(uint32_t) rtTimerSolRelease(PRTTIMER pTimer)
+{
+ uint32_t cRefs = ASMAtomicDecU32(&pTimer->cRefs);
+ if (!cRefs)
+ return rtTimeSolReleaseCleanup(pTimer);
+ return cRefs;
+}
+
+
+/**
+ * Callback wrapper for single-CPU timers.
+ *
+ * @param pvArg Opaque pointer to the timer.
+ *
+ * @remarks This will be executed in interrupt context but only at the specified
+ * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
+ * cyclic subsystem here, neither should pfnTimer().
+ */
+static void rtTimerSolSingleCallbackWrapper(void *pvArg)
+{
+ PRTTIMER pTimer = (PRTTIMER)pvArg;
+ AssertPtrReturnVoid(pTimer);
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ Assert(!pTimer->fAllCpus);
+
+ /* Make sure one-shots do not fire another time. */
+ Assert( !pTimer->fSuspended
+ || pTimer->cNsInterval != 0);
+
+ if (!pTimer->fSuspendedFromTimer)
+ {
+ /* Make sure we are firing on the right CPU. */
+ Assert( !pTimer->fSpecificCpu
+ || pTimer->iCpu == RTMpCpuId());
+
+ /* For one-shot, we may allow the callback to restart them. */
+ if (pTimer->cNsInterval == 0)
+ pTimer->fSuspendedFromTimer = true;
+
+ /*
+ * Perform the callout.
+ */
+ pTimer->u.Single.pActiveThread = curthread;
+
+ uint64_t u64Tick = ++pTimer->u.Single.u64Tick;
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
+
+ pTimer->u.Single.pActiveThread = NULL;
+
+ if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
+ {
+ if ( !pTimer->fIntervalChanged
+ || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
+ return;
+
+ /*
+ * The interval was changed, we need to set the expiration time
+ * ourselves before returning. This comes at a slight cost,
+ * which is why we don't do it all the time.
+ */
+ if (pTimer->u.Single.nsNextTick)
+ pTimer->u.Single.nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
+ else
+ pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
+ cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Single.nsNextTick);
+ return;
+ }
+
+ /*
+ * The timer has been suspended, set expiration time to infinitiy.
+ */
+ }
+ if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
+ cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
+}
+
+
+/**
+ * Callback wrapper for Omni-CPU timers.
+ *
+ * @param pvArg Opaque pointer to the timer.
+ *
+ * @remarks This will be executed in interrupt context but only at the specified
+ * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
+ * cyclic subsystem here, neither should pfnTimer().
+ */
+static void rtTimerSolOmniCallbackWrapper(void *pvArg)
+{
+ PRTTIMER pTimer = (PRTTIMER)pvArg;
+ AssertPtrReturnVoid(pTimer);
+ Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
+ Assert(pTimer->fAllCpus);
+
+ if (!pTimer->fSuspendedFromTimer)
+ {
+ /*
+ * Perform the callout.
+ */
+ uint32_t const iCpu = CPU->cpu_id;
+
+ pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = curthread;
+ uint64_t u64Tick = ++pTimer->u.Omni.aPerCpu[iCpu].u64Tick;
+
+ pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
+
+ pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = NULL;
+
+ if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
+ {
+ if ( !pTimer->fIntervalChanged
+ || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
+ return;
+
+ /*
+ * The interval was changed, we need to set the expiration time
+ * ourselves before returning. This comes at a slight cost,
+ * which is why we don't do it all the time.
+ *
+ * Note! The cyclic_reprogram call only affects the omni cyclic
+ * component for this CPU.
+ */
+ if (pTimer->u.Omni.aPerCpu[iCpu].nsNextTick)
+ pTimer->u.Omni.aPerCpu[iCpu].nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
+ else
+ pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
+ cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Omni.aPerCpu[iCpu].nsNextTick);
+ return;
+ }
+
+ /*
+ * The timer has been suspended, set expiration time to infinitiy.
+ */
+ }
+ if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
+ cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
+}
+
+
+/**
+ * Omni-CPU cyclic online event. This is called before the omni cycle begins to
+ * fire on the specified CPU.
+ *
+ * @param pvArg Opaque pointer to the timer.
+ * @param pCpu Pointer to the CPU on which it will fire.
+ * @param pCyclicHandler Pointer to a cyclic handler to add to the CPU
+ * specified in @a pCpu.
+ * @param pCyclicTime Pointer to the cyclic time and interval object.
+ *
+ * @remarks We -CANNOT- call back into the cyclic subsystem here, we can however
+ * block (sleep).
+ */
+static void rtTimerSolOmniCpuOnline(void *pvArg, cpu_t *pCpu, cyc_handler_t *pCyclicHandler, cyc_time_t *pCyclicTime)
+{
+ PRTTIMER pTimer = (PRTTIMER)pvArg;
+ AssertPtrReturnVoid(pTimer);
+ AssertPtrReturnVoid(pCpu);
+ AssertPtrReturnVoid(pCyclicHandler);
+ AssertPtrReturnVoid(pCyclicTime);
+ uint32_t const iCpu = pCpu->cpu_id; /* Note! CPU is not necessarily the same as pCpu. */
+
+ pTimer->u.Omni.aPerCpu[iCpu].u64Tick = 0;
+ pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = 0;
+
+ pCyclicHandler->cyh_func = (cyc_func_t)rtTimerSolOmniCallbackWrapper;
+ pCyclicHandler->cyh_arg = pTimer;
+ pCyclicHandler->cyh_level = CY_LOCK_LEVEL;
+
+ uint64_t u64Now = RTTimeSystemNanoTS();
+ if (pTimer->u.Omni.u64When < u64Now)
+ pCyclicTime->cyt_when = u64Now + pTimer->cNsInterval / 2;
+ else
+ pCyclicTime->cyt_when = pTimer->u.Omni.u64When;
+
+ pCyclicTime->cyt_interval = pTimer->cNsInterval;
+}
+
+
+RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
+{
+ RT_ASSERT_PREEMPTIBLE();
+ *ppTimer = NULL;
+
+ /*
+ * Validate flags.
+ */
+ if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
+ return VERR_INVALID_PARAMETER;
+
+ if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
+ && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
+ && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
+ return VERR_CPU_NOT_FOUND;
+
+ /* One-shot omni timers are not supported by the cyclic system. */
+ if ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
+ && u64NanoInterval == 0)
+ return VERR_NOT_SUPPORTED;
+
+ /*
+ * Allocate and initialize the timer handle. The omni variant has a
+ * variable sized array of ticks counts, thus the size calculation.
+ */
+ PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
+ ? RT_UOFFSETOF_DYN(RTTIMER, u.Omni.aPerCpu[RTMpGetCount()])
+ : sizeof(RTTIMER));
+ if (!pTimer)
+ return VERR_NO_MEMORY;
+
+ pTimer->u32Magic = RTTIMER_MAGIC;
+ pTimer->cRefs = 1;
+ pTimer->fSuspended = true;
+ pTimer->fSuspendedFromTimer = false;
+ pTimer->fIntervalChanged = false;
+ if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
+ {
+ pTimer->fAllCpus = true;
+ pTimer->fSpecificCpu = false;
+ pTimer->iCpu = UINT32_MAX;
+ }
+ else if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
+ {
+ pTimer->fAllCpus = false;
+ pTimer->fSpecificCpu = true;
+ pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; /* ASSUMES: index == cpuid */
+ }
+ else
+ {
+ pTimer->fAllCpus = false;
+ pTimer->fSpecificCpu = false;
+ pTimer->iCpu = UINT32_MAX;
+ }
+ pTimer->cNsInterval = u64NanoInterval;
+ pTimer->pfnTimer = pfnTimer;
+ pTimer->pvUser = pvUser;
+ pTimer->hCyclicId = CYCLIC_NONE;
+
+ *ppTimer = pTimer;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if the calling thread is currently executing the timer proceduce for
+ * the given timer.
+ *
+ * @returns true if it is, false if it isn't.
+ * @param pTimer The timer in question.
+ */
+DECLINLINE(bool) rtTimerSolIsCallingFromTimerProc(PRTTIMER pTimer)
+{
+ kthread_t *pCurThread = curthread;
+ AssertReturn(pCurThread, false); /* serious paranoia */
+
+ if (!pTimer->fAllCpus)
+ return pTimer->u.Single.pActiveThread == pCurThread;
+ return pTimer->u.Omni.aPerCpu[CPU->cpu_id].pActiveThread == pCurThread;
+}
+
+
+RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
+{
+ if (pTimer == NULL)
+ return VINF_SUCCESS;
+ RTTIMER_ASSERT_VALID_RET(pTimer);
+ RT_ASSERT_INTS_ON();
+
+ /*
+ * It is not possible to destroy a timer from it's callback function.
+ * Cyclic makes that impossible (or at least extremely risky).
+ */
+ AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT);
+
+ /*
+ * Invalidate the handle, make sure it's stopped and free the associated resources.
+ */
+ ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
+
+ if ( !pTimer->fSuspended
+ || pTimer->hCyclicId != CYCLIC_NONE) /* 2nd check shouldn't happen */
+ rtTimerSolStopIt(pTimer);
+
+ rtTimerSolRelease(pTimer);
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
+{
+ RTTIMER_ASSERT_VALID_RET(pTimer);
+ RT_ASSERT_INTS_ON();
+
+ /*
+ * It's not possible to restart a one-shot time from it's callback function,
+ * at least not at the moment.
+ */
+ AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT);
+
+ mutex_enter(&cpu_lock);
+
+ /*
+ * Make sure it's not active already. If it was suspended from a timer
+ * callback function, we need to do some cleanup work here before we can
+ * restart the timer.
+ */
+ if (!pTimer->fSuspended)
+ {
+ if (!pTimer->fSuspendedFromTimer)
+ {
+ mutex_exit(&cpu_lock);
+ return VERR_TIMER_ACTIVE;
+ }
+ cyclic_remove(pTimer->hCyclicId);
+ pTimer->hCyclicId = CYCLIC_NONE;
+ }
+
+ pTimer->fSuspended = false;
+ pTimer->fSuspendedFromTimer = false;
+ pTimer->fIntervalChanged = false;
+ if (pTimer->fAllCpus)
+ {
+ /*
+ * Setup omni (all CPU) timer. The Omni-CPU online event will fire
+ * and from there we setup periodic timers per CPU.
+ */
+ pTimer->u.Omni.u64When = RTTimeSystemNanoTS() + (u64First ? u64First : pTimer->cNsInterval);
+
+ cyc_omni_handler_t HandlerOmni;
+ HandlerOmni.cyo_online = rtTimerSolOmniCpuOnline;
+ HandlerOmni.cyo_offline = NULL;
+ HandlerOmni.cyo_arg = pTimer;
+
+ pTimer->hCyclicId = cyclic_add_omni(&HandlerOmni);
+ }
+ else
+ {
+ cyc_handler_t Handler;
+ cyc_time_t FireTime;
+
+ /*
+ * Setup a single CPU timer. If a specific CPU was requested, it
+ * must be online or the timer cannot start.
+ */
+ if ( pTimer->fSpecificCpu
+ && !RTMpIsCpuOnline(pTimer->iCpu)) /* ASSUMES: index == cpuid */
+ {
+ pTimer->fSuspended = true;
+
+ mutex_exit(&cpu_lock);
+ return VERR_CPU_OFFLINE;
+ }
+
+ Handler.cyh_func = (cyc_func_t)rtTimerSolSingleCallbackWrapper;
+ Handler.cyh_arg = pTimer;
+ Handler.cyh_level = CY_LOCK_LEVEL;
+
+ /*
+ * Use a large interval (1 hour) so that we don't get a timer-callback between
+ * cyclic_add() and cyclic_bind(). Program the correct interval once cyclic_bind() is done.
+ * See @bugref{7691#c20}.
+ */
+ if (!pTimer->fSpecificCpu)
+ FireTime.cyt_when = RTTimeSystemNanoTS() + u64First;
+ else
+ FireTime.cyt_when = RTTimeSystemNanoTS() + u64First + RT_NS_1HOUR;
+ FireTime.cyt_interval = pTimer->cNsInterval != 0
+ ? pTimer->cNsInterval
+ : CY_INFINITY /* Special value, see cyclic_fire(). */;
+ pTimer->u.Single.u64Tick = 0;
+ pTimer->u.Single.nsNextTick = 0;
+
+ pTimer->hCyclicId = cyclic_add(&Handler, &FireTime);
+ if (pTimer->fSpecificCpu)
+ {
+ cyclic_bind(pTimer->hCyclicId, cpu[pTimer->iCpu], NULL /* cpupart */);
+ cyclic_reprogram(pTimer->hCyclicId, RTTimeSystemNanoTS() + u64First);
+ }
+ }
+
+ mutex_exit(&cpu_lock);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Worker common for RTTimerStop and RTTimerDestroy.
+ *
+ * @param pTimer The timer to stop.
+ */
+static void rtTimerSolStopIt(PRTTIMER pTimer)
+{
+ mutex_enter(&cpu_lock);
+
+ pTimer->fSuspended = true;
+ if (pTimer->hCyclicId != CYCLIC_NONE)
+ {
+ cyclic_remove(pTimer->hCyclicId);
+ pTimer->hCyclicId = CYCLIC_NONE;
+ }
+ pTimer->fSuspendedFromTimer = false;
+
+ mutex_exit(&cpu_lock);
+}
+
+
+RTDECL(int) RTTimerStop(PRTTIMER pTimer)
+{
+ RTTIMER_ASSERT_VALID_RET(pTimer);
+ RT_ASSERT_INTS_ON();
+
+ if (pTimer->fSuspended)
+ return VERR_TIMER_SUSPENDED;
+
+ /* Trying the cpu_lock stuff and calling cyclic_remove may deadlock
+ the system, so just mark the timer as suspened and deal with it in
+ the callback wrapper function above. */
+ if (rtTimerSolIsCallingFromTimerProc(pTimer))
+ pTimer->fSuspendedFromTimer = true;
+ else
+ rtTimerSolStopIt(pTimer);
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
+{
+ /*
+ * Validate.
+ */
+ RTTIMER_ASSERT_VALID_RET(pTimer);
+ AssertReturn(u64NanoInterval > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(u64NanoInterval < UINT64_MAX / 8, VERR_INVALID_PARAMETER);
+ AssertReturn(pTimer->cNsInterval, VERR_INVALID_STATE);
+
+ if (pTimer->fSuspended || pTimer->fSuspendedFromTimer)
+ pTimer->cNsInterval = u64NanoInterval;
+ else
+ {
+ ASMAtomicWriteU64(&pTimer->cNsInterval, u64NanoInterval);
+ ASMAtomicWriteBool(&pTimer->fIntervalChanged, true);
+
+ if ( !pTimer->fAllCpus
+ && !pTimer->u.Single.nsNextTick
+ && pTimer->hCyclicId != CYCLIC_NONE
+ && rtTimerSolIsCallingFromTimerProc(pTimer))
+ pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS();
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
+{
+ return nsec_per_tick;
+}
+
+
+RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
+{
+ return VERR_NOT_SUPPORTED;
+}
+
+
+RTDECL(bool) RTTimerCanDoHighResolution(void)
+{
+ return true;
+}
+