summaryrefslogtreecommitdiffstats
path: root/src/VBox/GuestHost/HGSMI
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
commitf215e02bf85f68d3a6106c2a1f4f7f063f819064 (patch)
tree6bb5b92c046312c4e95ac2620b10ddf482d3fa8b /src/VBox/GuestHost/HGSMI
parentInitial commit. (diff)
downloadvirtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.tar.xz
virtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.zip
Adding upstream version 7.0.14-dfsg.upstream/7.0.14-dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/VBox/GuestHost/HGSMI')
-rw-r--r--src/VBox/GuestHost/HGSMI/.scm-settings31
-rw-r--r--src/VBox/GuestHost/HGSMI/HGSMICommon.cpp450
-rw-r--r--src/VBox/GuestHost/HGSMI/HGSMIMemAlloc.cpp684
-rw-r--r--src/VBox/GuestHost/HGSMI/Makefile.kmk59
4 files changed, 1224 insertions, 0 deletions
diff --git a/src/VBox/GuestHost/HGSMI/.scm-settings b/src/VBox/GuestHost/HGSMI/.scm-settings
new file mode 100644
index 00000000..e8698ae0
--- /dev/null
+++ b/src/VBox/GuestHost/HGSMI/.scm-settings
@@ -0,0 +1,31 @@
+# $Id: .scm-settings $
+## @file
+# Source code massager settings for HGSMI.
+#
+
+#
+# Copyright (C) 2010-2023 Oracle and/or its affiliates.
+#
+# This file is part of VirtualBox base platform packages, as
+# available from https://www.virtualbox.org.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation, in version 3 of the
+# License.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses>.
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+
+# @bugref{8524} r114561: MIT license of files upstreamed to the Linux kernel to
+# encourage other kernels to pick them up.
+HGSMICommon.cpp: --license-mit
+
diff --git a/src/VBox/GuestHost/HGSMI/HGSMICommon.cpp b/src/VBox/GuestHost/HGSMI/HGSMICommon.cpp
new file mode 100644
index 00000000..31b8eba5
--- /dev/null
+++ b/src/VBox/GuestHost/HGSMI/HGSMICommon.cpp
@@ -0,0 +1,450 @@
+/* $Id: HGSMICommon.cpp $ */
+/** @file
+ * VBox Host Guest Shared Memory Interface (HGSMI) - Functions common to both host and guest.
+ */
+
+/*
+ * Copyright (C) 2006-2023 Oracle and/or its affiliates.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#define LOG_DISABLED /* Maybe we can enabled it all the time now? */
+/** @note commented out all logging statements to avoid pulling the logging
+ * sub-system into places like the Linux kernel driver. Perhaps the best
+ * thing would be to use return enough information for callers to log what
+ * is needed. */
+#define LOG_GROUP LOG_GROUP_HGSMI
+
+#include <VBoxVideoIPRT.h>
+
+#include <HGSMI.h>
+// #include <VBox/log.h>
+
+
+/* Channel flags. */
+#define HGSMI_CH_F_REGISTERED 0x01
+
+/* Assertions for situations which could happen and normally must be processed properly
+ * but must be investigated during development: guest misbehaving, etc.
+ */
+#ifdef HGSMI_STRICT
+#define HGSMI_STRICT_ASSERT_FAILED() AssertFailed()
+#define HGSMI_STRICT_ASSERT(expr) Assert(expr)
+#else
+#define HGSMI_STRICT_ASSERT_FAILED() do {} while (0)
+#define HGSMI_STRICT_ASSERT(expr) do {} while (0)
+#endif /* !HGSMI_STRICT */
+
+/*
+ * We do not want assertions in Linux kernel code to reduce symbol dependencies.
+ */
+#if defined(IN_RING0) && defined(RT_OS_LINUX)
+# define HGSMI_ASSERT_PTR_RETURN(a, b) if (!(a)) return (b)
+#else
+# define HGSMI_ASSERT_PTR_RETURN(a, b) if (!(a)) return (b)
+#endif /* !IN_RING0 && RT_OS_LINUX */
+
+/* One-at-a-Time Hash from
+ * http://www.burtleburtle.net/bob/hash/doobs.html
+ *
+ * ub4 one_at_a_time(char *key, ub4 len)
+ * {
+ * ub4 hash, i;
+ * for (hash=0, i=0; i<len; ++i)
+ * {
+ * hash += key[i];
+ * hash += (hash << 10);
+ * hash ^= (hash >> 6);
+ * }
+ * hash += (hash << 3);
+ * hash ^= (hash >> 11);
+ * hash += (hash << 15);
+ * return hash;
+ * }
+ */
+
+static uint32_t hgsmiHashBegin(void)
+{
+ return 0;
+}
+
+static uint32_t hgsmiHashProcess(uint32_t hash, const void RT_UNTRUSTED_VOLATILE_HSTGST *pvData, size_t cbData)
+{
+ const uint8_t *pu8Data = (const uint8_t *)pvData;
+
+ while (cbData--)
+ {
+ hash += *pu8Data++;
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ return hash;
+}
+
+static uint32_t hgsmiHashEnd(uint32_t hash)
+{
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash;
+}
+
+uint32_t HGSMIChecksum(HGSMIOFFSET offBuffer, const HGSMIBUFFERHEADER RT_UNTRUSTED_VOLATILE_HSTGST *pHeader,
+ const HGSMIBUFFERTAIL RT_UNTRUSTED_VOLATILE_HSTGST *pTail)
+{
+ uint32_t u32Checksum = hgsmiHashBegin();
+
+ u32Checksum = hgsmiHashProcess(u32Checksum, &offBuffer, sizeof(offBuffer));
+ u32Checksum = hgsmiHashProcess(u32Checksum, pHeader, sizeof(HGSMIBUFFERHEADER));
+ u32Checksum = hgsmiHashProcess(u32Checksum, pTail, RT_UOFFSETOF(HGSMIBUFFERTAIL, u32Checksum));
+
+ return hgsmiHashEnd(u32Checksum);
+}
+
+int HGSMIAreaInitialize(HGSMIAREA *pArea, void *pvBase, HGSMISIZE cbArea, HGSMIOFFSET offBase)
+{
+ uint8_t *pu8Base = (uint8_t *)pvBase;
+
+ if ( !pArea /* Check that the area: */
+ || cbArea < HGSMIBufferMinimumSize() /* large enough; */
+ || pu8Base + cbArea < pu8Base /* no address space wrap; */
+ || offBase > UINT32_C(0xFFFFFFFF) - cbArea /* area within the 32 bit space: offBase + cbMem <= 0xFFFFFFFF. */
+ )
+ {
+ return VERR_INVALID_PARAMETER;
+ }
+
+ pArea->pu8Base = pu8Base;
+ pArea->offBase = offBase;
+ pArea->offLast = cbArea - HGSMIBufferMinimumSize() + offBase;
+ pArea->cbArea = cbArea;
+
+ return VINF_SUCCESS;
+}
+
+void HGSMIAreaClear(HGSMIAREA *pArea)
+{
+ if (pArea)
+ {
+ RT_ZERO(*pArea);
+ }
+}
+
+/* Initialize the memory buffer including its checksum.
+ * No changes alloed to the header and the tail after that.
+ */
+HGSMIOFFSET HGSMIBufferInitializeSingle(const HGSMIAREA *pArea,
+ HGSMIBUFFERHEADER *pHeader,
+ HGSMISIZE cbBuffer,
+ uint8_t u8Channel,
+ uint16_t u16ChannelInfo)
+{
+ if ( !pArea
+ || !pHeader
+ || cbBuffer < HGSMIBufferMinimumSize())
+ {
+ return HGSMIOFFSET_VOID;
+ }
+
+ /* Buffer must be within the area:
+ * * header data size do not exceed the maximum data size;
+ * * buffer address is greater than the area base address;
+ * * buffer address is lower than the maximum allowed for the given data size.
+ */
+ HGSMISIZE cbMaximumDataSize = pArea->offLast - pArea->offBase;
+ uint32_t u32DataSize = cbBuffer - HGSMIBufferMinimumSize();
+
+ if ( u32DataSize > cbMaximumDataSize
+ || (uint8_t *)pHeader < pArea->pu8Base
+ || (uint8_t *)pHeader > pArea->pu8Base + cbMaximumDataSize - u32DataSize)
+ {
+ return HGSMIOFFSET_VOID;
+ }
+
+ HGSMIOFFSET offBuffer = HGSMIPointerToOffset(pArea, pHeader);
+
+ pHeader->u8Flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE;
+ pHeader->u32DataSize = u32DataSize;
+ pHeader->u8Channel = u8Channel;
+ pHeader->u16ChannelInfo = u16ChannelInfo;
+ RT_ZERO(pHeader->u.au8Union);
+
+ HGSMIBUFFERTAIL RT_UNTRUSTED_VOLATILE_HSTGST *pTail = HGSMIBufferTailFromPtr(pHeader, u32DataSize);
+ pTail->u32Reserved = 0;
+ pTail->u32Checksum = HGSMIChecksum(offBuffer, pHeader, pTail);
+
+ return offBuffer;
+}
+
+int HGSMIHeapSetup(HGSMIHEAP *pHeap, void *pvBase, HGSMISIZE cbArea, HGSMIOFFSET offBase, const HGSMIENV *pEnv)
+{
+ HGSMI_ASSERT_PTR_RETURN(pHeap, VERR_INVALID_PARAMETER);
+ HGSMI_ASSERT_PTR_RETURN(pvBase, VERR_INVALID_PARAMETER);
+
+ int rc = HGSMIAreaInitialize(&pHeap->area, pvBase, cbArea, offBase);
+ if (RT_SUCCESS(rc))
+ {
+ rc = HGSMIMAInit(&pHeap->ma, &pHeap->area, NULL, 0, 0, pEnv);
+ if (RT_FAILURE(rc))
+ {
+ HGSMIAreaClear(&pHeap->area);
+ }
+ }
+
+ return rc;
+}
+
+void HGSMIHeapDestroy(HGSMIHEAP *pHeap)
+{
+ if (pHeap)
+ {
+ HGSMIMAUninit(&pHeap->ma);
+ RT_ZERO(*pHeap);
+ }
+}
+
+void RT_UNTRUSTED_VOLATILE_HOST *HGSMIHeapAlloc(HGSMIHEAP *pHeap,
+ HGSMISIZE cbData,
+ uint8_t u8Channel,
+ uint16_t u16ChannelInfo)
+{
+ HGSMISIZE cbAlloc = HGSMIBufferRequiredSize(cbData);
+ HGSMIBUFFERHEADER *pHeader = (HGSMIBUFFERHEADER *)HGSMIHeapBufferAlloc(pHeap, cbAlloc);
+ if (pHeader)
+ {
+ HGSMIOFFSET offBuffer = HGSMIBufferInitializeSingle(HGSMIHeapArea(pHeap), pHeader,
+ cbAlloc, u8Channel, u16ChannelInfo);
+ if (offBuffer == HGSMIOFFSET_VOID)
+ {
+ HGSMIHeapBufferFree(pHeap, pHeader);
+ pHeader = NULL;
+ }
+ }
+
+ return pHeader? HGSMIBufferDataFromPtr(pHeader): NULL;
+}
+
+void HGSMIHeapFree(HGSMIHEAP *pHeap, void RT_UNTRUSTED_VOLATILE_GUEST *pvData)
+{
+ if (pvData)
+ {
+ HGSMIBUFFERHEADER RT_UNTRUSTED_VOLATILE_HOST *pHeader = HGSMIBufferHeaderFromData(pvData);
+ HGSMIHeapBufferFree(pHeap, pHeader);
+ }
+}
+
+void RT_UNTRUSTED_VOLATILE_HSTGST *HGSMIHeapBufferAlloc(HGSMIHEAP *pHeap, HGSMISIZE cbBuffer)
+{
+ return HGSMIMAAlloc(&pHeap->ma, cbBuffer);
+}
+
+void HGSMIHeapBufferFree(HGSMIHEAP *pHeap, void RT_UNTRUSTED_VOLATILE_GUEST *pvBuf)
+{
+ HGSMIMAFree(&pHeap->ma, pvBuf);
+}
+
+typedef struct HGSMIBUFFERCONTEXT
+{
+ /** The original buffer header. */
+ const HGSMIBUFFERHEADER RT_UNTRUSTED_VOLATILE_HSTGST *pHeader;
+ /** Payload data in the buffer. */
+ void RT_UNTRUSTED_VOLATILE_HSTGST *pvData;
+ /** Size of data */
+ uint32_t cbData;
+} HGSMIBUFFERCONTEXT;
+
+/** Verify that the given offBuffer points to a valid buffer, which is within the area.
+ *
+ * @returns VBox status and the buffer information in pBufferContext.
+ * @param pArea Area which supposed to contain the buffer.
+ * @param offBuffer The buffer location in the area.
+ * @param pBufferContext Where to write information about the buffer.
+ */
+static int hgsmiVerifyBuffer(const HGSMIAREA *pArea, HGSMIOFFSET offBuffer, HGSMIBUFFERCONTEXT *pBufferContext)
+{
+ // LogFlowFunc(("buffer 0x%x, area %p %x [0x%x;0x%x]\n",
+ // offBuffer, pArea->pu8Base, pArea->cbArea, pArea->offBase, pArea->offLast));
+
+ int rc = VINF_SUCCESS;
+
+ if ( offBuffer < pArea->offBase
+ || offBuffer > pArea->offLast)
+ {
+ // LogFunc(("offset 0x%x is outside the area [0x%x;0x%x]!!!\n",
+ // offBuffer, pArea->offBase, pArea->offLast));
+ rc = VERR_INVALID_PARAMETER;
+ HGSMI_STRICT_ASSERT_FAILED();
+ }
+ else
+ {
+ void RT_UNTRUSTED_VOLATILE_HSTGST *pvBuffer = HGSMIOffsetToPointer(pArea, offBuffer);
+ HGSMIBUFFERHEADER header;
+ memcpy(&header, (void *)HGSMIBufferHeaderFromPtr(pvBuffer), sizeof(header));
+ ASMCompilerBarrier();
+
+ /* Quick check of the data size, it should be less than the maximum
+ * data size for the buffer at this offset.
+ */
+ // LogFlowFunc(("datasize check: header.u32DataSize = 0x%x pArea->offLast - offBuffer = 0x%x\n",
+ // header.u32DataSize, pArea->offLast - offBuffer));
+
+ if (header.u32DataSize <= pArea->offLast - offBuffer)
+ {
+ HGSMIBUFFERTAIL tail;
+ memcpy(&tail, (void *)HGSMIBufferTailFromPtr(pvBuffer, header.u32DataSize), sizeof(tail));
+ ASMCompilerBarrier();
+
+ /* At least both header and tail structures are in the area. Check the checksum. */
+ uint32_t u32Checksum = HGSMIChecksum(offBuffer, &header, &tail);
+ // LogFlowFunc(("checksum check: u32Checksum = 0x%x pTail->u32Checksum = 0x%x\n",
+ // u32Checksum, tail.u32Checksum));
+ if (u32Checksum == tail.u32Checksum)
+ {
+ /* Success. */
+ pBufferContext->pHeader = HGSMIBufferHeaderFromPtr(pvBuffer);
+ pBufferContext->pvData = HGSMIBufferDataFromPtr(pvBuffer);
+ pBufferContext->cbData = header.u32DataSize;
+ }
+ else
+ {
+ // LogFunc(("invalid checksum 0x%x, expected 0x%x!!!\n",
+ // u32Checksum, tail.u32Checksum));
+ rc = VERR_INVALID_STATE;
+ HGSMI_STRICT_ASSERT_FAILED();
+ }
+ }
+ else
+ {
+ // LogFunc(("invalid data size 0x%x, maximum is 0x%x!!!\n",
+ // header.u32DataSize, pArea->offLast - offBuffer));
+ rc = VERR_TOO_MUCH_DATA;
+ HGSMI_STRICT_ASSERT_FAILED();
+ }
+ }
+
+ return rc;
+}
+
+/** Helper to convert HGSMI channel index to the channel structure pointer.
+ *
+ * @returns Pointer to the channel data.
+ * @param pChannelInfo The channel pool.
+ * @param u8Channel The channel index.
+ */
+HGSMICHANNEL *HGSMIChannelFindById(HGSMICHANNELINFO *pChannelInfo,
+ uint8_t u8Channel)
+{
+ AssertCompile(RT_ELEMENTS(pChannelInfo->Channels) >= 0x100);
+ HGSMICHANNEL *pChannel = &pChannelInfo->Channels[u8Channel];
+
+ if (pChannel->u8Flags & HGSMI_CH_F_REGISTERED)
+ {
+ return pChannel;
+ }
+
+ return NULL;
+}
+
+/** Process a guest buffer.
+ *
+ * @returns VBox status code.
+ * @param pArea Area which supposed to contain the buffer.
+ * @param pChannelInfo The channel pool.
+ * @param offBuffer The buffer location in the area.
+ */
+int HGSMIBufferProcess(const HGSMIAREA *pArea,
+ HGSMICHANNELINFO *pChannelInfo,
+ HGSMIOFFSET offBuffer)
+{
+ // LogFlowFunc(("pArea %p, offBuffer 0x%x\n", pArea, offBuffer));
+
+ HGSMI_ASSERT_PTR_RETURN(pArea, VERR_INVALID_PARAMETER);
+ HGSMI_ASSERT_PTR_RETURN(pChannelInfo, VERR_INVALID_PARAMETER);
+
+ /* Guest has prepared a command description at 'offBuffer'. */
+ HGSMIBUFFERCONTEXT bufferContext = { NULL, NULL, 0 }; /* Makes old GCC happier. */
+ int rc = hgsmiVerifyBuffer(pArea, offBuffer, &bufferContext);
+ if (RT_SUCCESS(rc))
+ {
+ /* Pass the command to the appropriate handler registered with this instance.
+ * Start with the handler list head, which is the preallocated HGSMI setup channel.
+ */
+ const HGSMICHANNEL *pChannel = HGSMIChannelFindById(pChannelInfo, bufferContext.pHeader->u8Channel);
+ if (pChannel)
+ {
+ const HGSMICHANNELHANDLER *pHandler = &pChannel->handler;
+ if (pHandler->pfnHandler)
+ {
+ pHandler->pfnHandler(pHandler->pvHandler, bufferContext.pHeader->u16ChannelInfo,
+ bufferContext.pvData, bufferContext.cbData);
+ }
+ HGSMI_STRICT_ASSERT(RT_SUCCESS(hgsmiVerifyBuffer(pArea, offBuffer, &bufferContext)));
+ }
+ else
+ {
+ rc = VERR_INVALID_FUNCTION;
+ HGSMI_STRICT_ASSERT_FAILED();
+ }
+ }
+
+ return rc;
+}
+
+/** Register a new HGSMI channel by index.
+ *
+ * @returns VBox status code.
+ * @param pChannelInfo The channel pool managed by the caller.
+ * @param u8Channel Index of the channel.
+ * @param pszName Name of the channel (optional, allocated by the caller).
+ * @param pfnChannelHandler The channel callback.
+ * @param pvChannelHandler The callback pointer.
+ */
+int HGSMIChannelRegister(HGSMICHANNELINFO *pChannelInfo,
+ uint8_t u8Channel,
+ const char *pszName,
+ PFNHGSMICHANNELHANDLER pfnChannelHandler,
+ void *pvChannelHandler)
+{
+ /* Check whether the channel is already registered. */
+ HGSMICHANNEL *pChannel = HGSMIChannelFindById(pChannelInfo, u8Channel);
+ if (pChannel)
+ {
+ HGSMI_STRICT_ASSERT_FAILED();
+ return VERR_ALREADY_EXISTS;
+ }
+
+ /* Channel is not yet registered. */
+ pChannel = &pChannelInfo->Channels[u8Channel];
+
+ pChannel->u8Flags = HGSMI_CH_F_REGISTERED;
+ pChannel->u8Channel = u8Channel;
+
+ pChannel->handler.pfnHandler = pfnChannelHandler;
+ pChannel->handler.pvHandler = pvChannelHandler;
+
+ pChannel->pszName = pszName;
+
+ return VINF_SUCCESS;
+}
diff --git a/src/VBox/GuestHost/HGSMI/HGSMIMemAlloc.cpp b/src/VBox/GuestHost/HGSMI/HGSMIMemAlloc.cpp
new file mode 100644
index 00000000..d9210f70
--- /dev/null
+++ b/src/VBox/GuestHost/HGSMI/HGSMIMemAlloc.cpp
@@ -0,0 +1,684 @@
+/* $Id: HGSMIMemAlloc.cpp $ */
+/** @file
+ * VBox Host Guest Shared Memory Interface (HGSMI) - Memory allocator.
+ */
+
+/*
+ * Copyright (C) 2014-2023 Oracle and/or its affiliates.
+ *
+ * This file is part of VirtualBox base platform packages, as
+ * available from https://www.virtualbox.org.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, in version 3 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <https://www.gnu.org/licenses>.
+ *
+ * SPDX-License-Identifier: GPL-3.0-only
+ */
+
+/*
+ * Memory allocator
+ * ----------------
+ *
+ * Area [0; AreaSize) contains only the data, control structures are separate.
+ * Block sizes are power of 2: 32B, ..., 1MB
+ * Area size can be anything and will be divided initially to largest possible free blocks.
+ *
+ * The entire area is described by a list of 32 bit block descriptors:
+ * * bits 0..3 - order, which is log2 size of the block - 5: 2^(0+5) ... 2^(15+5) == 32B .. 1MB
+ * * bit 4 - 1 for free blocks.
+ * * bits 5..31 - block offset.
+ *
+ * 31 ... 5 | 4 | 3 ... 0
+ * offset F order
+ *
+ * There is a sorted collection of all block descriptors
+ * (key is the block offset, bits 0...4 do not interfere with sorting).
+ * Also there are lists of free blocks for each size for fast allocation.
+ *
+ *
+ * Implementation
+ * --------------
+ *
+ * The blocks collection is a sorted linear list.
+ *
+ * Initially the entire area consists of one or more largest blocks followed by smaller blocks:
+ * * 100B area - 64B block with descriptor: 0x00000011
+ * 32B block with descriptor: 0x00000030
+ * 4B unused
+ * * 64K area - one 64K block with descriptor: 0x0000001C
+ * * 512K area - one 512K block with descriptor: 0x0000001F
+ *
+ * When allocating a new block:
+ * * larger free blocks are splitted when there are no smaller free blocks;
+ * * smaller free blocks are merged if they can build a requested larger block.
+ */
+#include <HGSMIMemAlloc.h>
+#include <HGSMI.h>
+
+#include <VBoxVideoIPRT.h>
+
+/*
+ * We do not want assertions in Linux kernel code to reduce symbol dependencies.
+ */
+#if defined(IN_RING0) && defined(RT_OS_LINUX)
+# define HGSMI_ASSERT_RETURN(a, b) if (!(a)) return (b)
+# define HGSMI_ASSERT_FAILED() do {} while (0)
+# define HGSMI_ASSERT(expr) do {} while (0)
+#else
+# define HGSMI_ASSERT_RETURN(a, b) AssertReturn(a, b)
+# define HGSMI_ASSERT_FAILED() AssertFailed()
+# define HGSMI_ASSERT(expr) Assert(expr)
+#endif /* !IN_RING0 && RT_OS_LINUX */
+
+DECLINLINE(HGSMIOFFSET) hgsmiMADescriptor(HGSMIOFFSET off, bool fFree, HGSMIOFFSET order)
+{
+ return (off & HGSMI_MA_DESC_OFFSET_MASK) |
+ (fFree? HGSMI_MA_DESC_FREE_MASK: 0) |
+ (order & HGSMI_MA_DESC_ORDER_MASK);
+}
+
+static void hgsmiMABlockFree(HGSMIMADATA *pMA, HGSMIMABLOCK *pBlock)
+{
+ pMA->env.pfnFree(pMA->env.pvEnv, pBlock);
+}
+
+static int hgsmiMABlockAlloc(HGSMIMADATA *pMA, HGSMIMABLOCK **ppBlock)
+{
+ int rc = VINF_SUCCESS;
+
+ HGSMIMABLOCK *pBlock = (HGSMIMABLOCK *)pMA->env.pfnAlloc(pMA->env.pvEnv, sizeof(HGSMIMABLOCK));
+ if (pBlock)
+ {
+ RT_ZERO(pBlock->nodeBlock);
+ *ppBlock = pBlock;
+ }
+ else
+ {
+ rc = VERR_NO_MEMORY;
+ }
+
+ return rc;
+}
+
+/* Divide entire area to free blocks. */
+static int hgsmiMAFormat(HGSMIMADATA *pMA)
+{
+ int rc = VINF_SUCCESS;
+
+ /* Initial value, it will be updated in the loop below. */
+ pMA->cbMaxBlock = HGSMI_MA_BLOCK_SIZE_MIN;
+ pMA->cBlocks = 0;
+
+ HGSMISIZE cbBlock = HGSMI_MA_BLOCK_SIZE_MAX;
+ HGSMISIZE cbRemaining = pMA->area.cbArea;
+ HGSMIOFFSET off = 0;
+
+ while (cbBlock >= HGSMI_MA_BLOCK_SIZE_MIN)
+ {
+ /* Build a list of free memory blocks with u32BlockSize. */
+ uint32_t cBlocks = cbRemaining / cbBlock;
+ if (cBlocks > 0)
+ {
+ if (pMA->cbMaxBlock < cbBlock)
+ {
+ pMA->cbMaxBlock = cbBlock;
+ }
+
+ HGSMIOFFSET order = HGSMIMASize2Order(cbBlock);
+
+ uint32_t i;
+ for (i = 0; i < cBlocks; ++i)
+ {
+ /* A new free block. */
+ HGSMIMABLOCK *pBlock;
+ rc = hgsmiMABlockAlloc(pMA, &pBlock);
+ if (RT_FAILURE(rc))
+ {
+ break;
+ }
+
+ pBlock->descriptor = hgsmiMADescriptor(off, true, order);
+ RTListAppend(&pMA->listBlocks, &pBlock->nodeBlock);
+ ++pMA->cBlocks;
+
+ off += cbBlock;
+ cbRemaining -= cbBlock;
+ }
+ }
+
+ if (RT_FAILURE(rc))
+ {
+ break;
+ }
+
+ cbBlock /= 2;
+ }
+
+ return rc;
+}
+
+static int hgsmiMARebuildFreeLists(HGSMIMADATA *pMA)
+{
+ int rc = VINF_SUCCESS;
+
+ HGSMIMABLOCK *pIter;
+ RTListForEach(&pMA->listBlocks, pIter, HGSMIMABLOCK, nodeBlock)
+ {
+ if (HGSMI_MA_DESC_IS_FREE(pIter->descriptor))
+ {
+ HGSMIOFFSET order = HGSMI_MA_DESC_ORDER(pIter->descriptor);
+ RTListAppend(&pMA->aListFreeBlocks[order], &pIter->nodeFree);
+ }
+ }
+
+ return rc;
+}
+
+static int hgsmiMARestore(HGSMIMADATA *pMA, HGSMIOFFSET *paDescriptors, uint32_t cDescriptors, HGSMISIZE cbMaxBlock)
+{
+ int rc = VINF_SUCCESS;
+
+ pMA->cbMaxBlock = cbMaxBlock;
+ pMA->cBlocks = 0;
+
+ HGSMISIZE cbRemaining = pMA->area.cbArea;
+ HGSMIOFFSET off = 0;
+
+ uint32_t i;
+ for (i = 0; i < cDescriptors; ++i)
+ {
+ /* Verify the descriptor. */
+ HGSMISIZE cbBlock = HGSMIMAOrder2Size(HGSMI_MA_DESC_ORDER(paDescriptors[i]));
+ if ( off != HGSMI_MA_DESC_OFFSET(paDescriptors[i])
+ || cbBlock > cbRemaining
+ || cbBlock > pMA->cbMaxBlock)
+ {
+ rc = VERR_INVALID_PARAMETER;
+ break;
+ }
+
+ /* A new free block. */
+ HGSMIMABLOCK *pBlock;
+ rc = hgsmiMABlockAlloc(pMA, &pBlock);
+ if (RT_FAILURE(rc))
+ {
+ break;
+ }
+
+ pBlock->descriptor = paDescriptors[i];
+ RTListAppend(&pMA->listBlocks, &pBlock->nodeBlock);
+ ++pMA->cBlocks;
+
+ off += cbBlock;
+ cbRemaining -= cbBlock;
+ }
+
+ return rc;
+}
+
+static HGSMIMABLOCK *hgsmiMAGetFreeBlock(HGSMIMADATA *pMA, HGSMIOFFSET order)
+{
+ HGSMIMABLOCK *pBlock = NULL;
+
+ HGSMIOFFSET i;
+ for (i = order; i < RT_ELEMENTS(pMA->aListFreeBlocks); ++i)
+ {
+ pBlock = RTListGetFirst(&pMA->aListFreeBlocks[i], HGSMIMABLOCK, nodeFree);
+ if (pBlock)
+ {
+ break;
+ }
+ }
+
+ if (pBlock)
+ {
+ HGSMI_ASSERT_RETURN(HGSMI_MA_DESC_IS_FREE(pBlock->descriptor), NULL);
+
+ /* Where the block starts. */
+ HGSMIOFFSET off = HGSMI_MA_DESC_OFFSET(pBlock->descriptor);
+
+ /* 'i' is the order of the block. */
+ while (i != order)
+ {
+ /* A larger block was found and need to be split to 2 smaller blocks. */
+ HGSMIMABLOCK *pBlock2;
+ int rc = hgsmiMABlockAlloc(pMA, &pBlock2);
+ if (RT_FAILURE(rc))
+ {
+ pBlock = NULL;
+ break;
+ }
+
+ /* Create 2 blocks with descreased order. */
+ --i;
+
+ /* Remove from the free list. */
+ RTListNodeRemove(&pBlock->nodeFree);
+
+ pBlock->descriptor = hgsmiMADescriptor(off, true, i);
+ pBlock2->descriptor = hgsmiMADescriptor(off + HGSMIMAOrder2Size(i), true, i);
+
+ /* Update list of all blocks by inserting pBlock2 after pBlock. */
+ RTListNodeInsertAfter(&pBlock->nodeBlock, &pBlock2->nodeBlock);
+ ++pMA->cBlocks;
+
+ /* Update the free list. */
+ RTListAppend(&pMA->aListFreeBlocks[i], &pBlock->nodeFree);
+ RTListAppend(&pMA->aListFreeBlocks[i], &pBlock2->nodeFree);
+ }
+ }
+
+ return pBlock;
+}
+
+static void hgsmiMAReformatFreeBlocks(HGSMIMADATA *pMA, HGSMIOFFSET maxId,
+ HGSMIMABLOCK *pStart, HGSMIMABLOCK *pEnd, HGSMISIZE cbBlocks)
+{
+ int rc = VINF_SUCCESS;
+
+ /*
+ * Blocks starting from pStart until pEnd will be replaced with
+ * another set of blocks.
+ *
+ * The new set will include the block with the required order.
+ * Since the required order is larger than any existing block,
+ * it will replace at least two existing blocks.
+ * The new set will also have minimal possible number of blocks.
+ * Therefore the new set will have at least one block less.
+ * Blocks will be updated in place and remaining blocks will be
+ * deallocated.
+ */
+
+ HGSMISIZE u32BlockSize = HGSMIMAOrder2Size(maxId);
+ HGSMISIZE cbRemaining = cbBlocks;
+ HGSMIOFFSET off = HGSMI_MA_DESC_OFFSET(pStart->descriptor);
+ HGSMIMABLOCK *pBlock = pStart;
+
+ while (u32BlockSize >= HGSMI_MA_BLOCK_SIZE_MIN && cbRemaining)
+ {
+ /* Build a list of free memory blocks with u32BlockSize. */
+ uint32_t cBlocks = cbRemaining / u32BlockSize;
+ if (cBlocks > 0)
+ {
+ HGSMIOFFSET order = HGSMIMASize2Order(u32BlockSize);
+
+ uint32_t i;
+ for (i = 0; i < cBlocks; ++i)
+ {
+ if (pBlock == pEnd)
+ {
+ /* Should never happen because the new set of blocks is supposed to be smaller. */
+ HGSMI_ASSERT_FAILED();
+ rc = VERR_OUT_OF_RESOURCES;
+ break;
+ }
+
+ /* Remove from the free list. */
+ RTListNodeRemove(&pBlock->nodeFree);
+
+ pBlock->descriptor = hgsmiMADescriptor(off, true, order);
+
+ RTListAppend(&pMA->aListFreeBlocks[order], &pBlock->nodeFree);
+
+ off += u32BlockSize;
+ cbRemaining -= u32BlockSize;
+
+ pBlock = RTListGetNext(&pMA->listBlocks, pBlock, HGSMIMABLOCK, nodeBlock);
+ }
+ }
+
+ if (RT_FAILURE(rc))
+ {
+ break;
+ }
+
+ u32BlockSize /= 2;
+ }
+
+ HGSMI_ASSERT(cbRemaining == 0);
+
+ if (RT_SUCCESS(rc))
+ {
+ /* Remove remaining free blocks from pBlock until pEnd */
+ for (;;)
+ {
+ bool fEnd = (pBlock == pEnd);
+ HGSMIMABLOCK *pNext = RTListGetNext(&pMA->listBlocks, pBlock, HGSMIMABLOCK, nodeBlock);
+
+ RTListNodeRemove(&pBlock->nodeFree);
+ RTListNodeRemove(&pBlock->nodeBlock);
+ --pMA->cBlocks;
+
+ hgsmiMABlockFree(pMA, pBlock);
+
+ if (fEnd)
+ {
+ break;
+ }
+
+ pBlock = pNext;
+ }
+ }
+}
+
+static void hgsmiMAQueryFreeRange(HGSMIMADATA *pMA, HGSMIMABLOCK *pBlock, HGSMISIZE cbRequired,
+ HGSMIMABLOCK **ppStart, HGSMIMABLOCK **ppEnd, HGSMISIZE *pcbBlocks)
+{
+ HGSMI_ASSERT(HGSMI_MA_DESC_IS_FREE(pBlock->descriptor));
+
+ *pcbBlocks = HGSMIMAOrder2Size(HGSMI_MA_DESC_ORDER(pBlock->descriptor));
+ *ppStart = pBlock;
+ *ppEnd = pBlock;
+
+ HGSMIMABLOCK *p;
+ for (;;)
+ {
+ p = RTListGetNext(&pMA->listBlocks, *ppEnd, HGSMIMABLOCK, nodeBlock);
+ if (!p || !HGSMI_MA_DESC_IS_FREE(p->descriptor))
+ {
+ break;
+ }
+ *pcbBlocks += HGSMIMAOrder2Size(HGSMI_MA_DESC_ORDER(p->descriptor));
+ *ppEnd = p;
+
+ if (cbRequired && *pcbBlocks >= cbRequired)
+ {
+ return;
+ }
+ }
+ for (;;)
+ {
+ p = RTListGetPrev(&pMA->listBlocks, *ppStart, HGSMIMABLOCK, nodeBlock);
+ if (!p || !HGSMI_MA_DESC_IS_FREE(p->descriptor))
+ {
+ break;
+ }
+ *pcbBlocks += HGSMIMAOrder2Size(HGSMI_MA_DESC_ORDER(p->descriptor));
+ *ppStart = p;
+
+ if (cbRequired && *pcbBlocks >= cbRequired)
+ {
+ return;
+ }
+ }
+}
+
+static void hgsmiMAMergeFreeBlocks(HGSMIMADATA *pMA, HGSMIOFFSET order)
+{
+ /* Try to create a free block with the order from smaller free blocks. */
+ if (order == 0)
+ {
+ /* No smaller blocks. */
+ return;
+ }
+
+ HGSMISIZE cbRequired = HGSMIMAOrder2Size(order);
+
+ /* Scan all free lists of smaller blocks.
+ *
+ * Get the sequence of free blocks before and after each free block.
+ * If possible, re-split the sequence to get the required block and other free block(s).
+ * If not possible, try the next free block.
+ *
+ * Free blocks are scanned from i to 0 orders.
+ */
+ HGSMIOFFSET i = order - 1;
+ for (;;)
+ {
+ HGSMIMABLOCK *pIter;
+ RTListForEach(&pMA->aListFreeBlocks[i], pIter, HGSMIMABLOCK, nodeFree)
+ {
+ HGSMI_ASSERT(HGSMI_MA_DESC_ORDER(pIter->descriptor) == i);
+
+ HGSMISIZE cbBlocks;
+ HGSMIMABLOCK *pFreeStart;
+ HGSMIMABLOCK *pFreeEnd;
+ hgsmiMAQueryFreeRange(pMA, pIter, cbRequired, &pFreeStart, &pFreeEnd, &cbBlocks);
+
+ HGSMI_ASSERT((cbBlocks / HGSMI_MA_BLOCK_SIZE_MIN) * HGSMI_MA_BLOCK_SIZE_MIN == cbBlocks);
+
+ /* Verify whether cbBlocks is enough for the requested block. */
+ if (cbBlocks >= cbRequired)
+ {
+ /* Build new free blocks starting from the requested. */
+ hgsmiMAReformatFreeBlocks(pMA, order, pFreeStart, pFreeEnd, cbBlocks);
+ i = 0; /* Leave the loop. */
+ break;
+ }
+ }
+
+ if (i == 0)
+ {
+ break;
+ }
+
+ --i;
+ }
+}
+
+static HGSMIOFFSET hgsmiMAAlloc(HGSMIMADATA *pMA, HGSMISIZE cb)
+{
+ if (cb > pMA->cbMaxBlock)
+ {
+ return HGSMIOFFSET_VOID;
+ }
+
+ if (cb < HGSMI_MA_BLOCK_SIZE_MIN)
+ {
+ cb = HGSMI_MA_BLOCK_SIZE_MIN;
+ }
+
+ HGSMIOFFSET order = HGSMIPopCnt32(cb - 1) - HGSMI_MA_DESC_ORDER_BASE;
+
+ HGSMI_ASSERT_RETURN(HGSMIMAOrder2Size(order) >= cb, HGSMIOFFSET_VOID);
+ HGSMI_ASSERT_RETURN(order < RT_ELEMENTS(pMA->aListFreeBlocks), HGSMIOFFSET_VOID);
+
+ HGSMIMABLOCK *pBlock = hgsmiMAGetFreeBlock(pMA, order);
+ if (RT_UNLIKELY(pBlock == NULL))
+ {
+ /* No free block with large enough size. Merge smaller free blocks and try again. */
+ hgsmiMAMergeFreeBlocks(pMA, order);
+ pBlock = hgsmiMAGetFreeBlock(pMA, order);
+ }
+
+ if (RT_LIKELY(pBlock != NULL))
+ {
+ RTListNodeRemove(&pBlock->nodeFree);
+ pBlock->descriptor &= ~HGSMI_MA_DESC_FREE_MASK;
+ return HGSMI_MA_DESC_OFFSET(pBlock->descriptor);
+ }
+
+ return HGSMIOFFSET_VOID;
+}
+
+static void hgsmiMAFree(HGSMIMADATA *pMA, HGSMIOFFSET off)
+{
+ if (off == HGSMIOFFSET_VOID)
+ {
+ return;
+ }
+
+ /* Find the block corresponding to the offset. */
+ HGSMI_ASSERT((off / HGSMI_MA_BLOCK_SIZE_MIN) * HGSMI_MA_BLOCK_SIZE_MIN == off);
+
+ HGSMIMABLOCK *pBlock = HGSMIMASearchOffset(pMA, off);
+ if (pBlock)
+ {
+ if (HGSMI_MA_DESC_OFFSET(pBlock->descriptor) == off)
+ {
+ /* Found the right block, mark it as free. */
+ pBlock->descriptor |= HGSMI_MA_DESC_FREE_MASK;
+ RTListAppend(&pMA->aListFreeBlocks[HGSMI_MA_DESC_ORDER(pBlock->descriptor)], &pBlock->nodeFree);
+ return;
+ }
+ }
+
+ HGSMI_ASSERT_FAILED();
+}
+
+int HGSMIMAInit(HGSMIMADATA *pMA, const HGSMIAREA *pArea,
+ HGSMIOFFSET *paDescriptors, uint32_t cDescriptors, HGSMISIZE cbMaxBlock,
+ const HGSMIENV *pEnv)
+{
+ HGSMI_ASSERT_RETURN(pArea->cbArea < UINT32_C(0x80000000), VERR_INVALID_PARAMETER);
+ HGSMI_ASSERT_RETURN(pArea->cbArea >= HGSMI_MA_BLOCK_SIZE_MIN, VERR_INVALID_PARAMETER);
+
+ RT_ZERO(*pMA);
+
+ HGSMISIZE cb = (pArea->cbArea / HGSMI_MA_BLOCK_SIZE_MIN) * HGSMI_MA_BLOCK_SIZE_MIN;
+
+ int rc = HGSMIAreaInitialize(&pMA->area, pArea->pu8Base, cb, 0);
+ if (RT_SUCCESS(rc))
+ {
+ pMA->env = *pEnv;
+
+ uint32_t i;
+ for (i = 0; i < RT_ELEMENTS(pMA->aListFreeBlocks); ++i)
+ {
+ RTListInit(&pMA->aListFreeBlocks[i]);
+ }
+ RTListInit(&pMA->listBlocks);
+
+ if (cDescriptors)
+ {
+ rc = hgsmiMARestore(pMA, paDescriptors, cDescriptors, cbMaxBlock);
+ }
+ else
+ {
+ rc = hgsmiMAFormat(pMA);
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ rc = hgsmiMARebuildFreeLists(pMA);
+ }
+ }
+
+ return rc;
+}
+
+void HGSMIMAUninit(HGSMIMADATA *pMA)
+{
+ HGSMIMABLOCK *pIter;
+ HGSMIMABLOCK *pNext;
+ /* If it has been initialized. */
+ if (pMA->listBlocks.pNext)
+ {
+ RTListForEachSafe(&pMA->listBlocks, pIter, pNext, HGSMIMABLOCK, nodeBlock)
+ {
+ RTListNodeRemove(&pIter->nodeBlock);
+ hgsmiMABlockFree(pMA, pIter);
+ }
+ }
+
+ RT_ZERO(*pMA);
+}
+
+HGSMIOFFSET HGSMIMAPointerToOffset(const HGSMIMADATA *pMA, const void RT_UNTRUSTED_VOLATILE_GUEST *pv)
+{
+ uintptr_t off = (uintptr_t)pv - (uintptr_t)pMA->area.pu8Base;
+ if (off < pMA->area.cbArea)
+ return pMA->area.offBase + off;
+
+ HGSMI_ASSERT_FAILED();
+ return HGSMIOFFSET_VOID;
+}
+
+static void RT_UNTRUSTED_VOLATILE_HSTGST *HGSMIMAOffsetToPointer(const HGSMIMADATA *pMA, HGSMIOFFSET off)
+{
+ if (HGSMIAreaContainsOffset(&pMA->area, off))
+ {
+ return HGSMIOffsetToPointer(&pMA->area, off);
+ }
+
+ HGSMI_ASSERT_FAILED();
+ return NULL;
+}
+
+void RT_UNTRUSTED_VOLATILE_HSTGST *HGSMIMAAlloc(HGSMIMADATA *pMA, HGSMISIZE cb)
+{
+ HGSMIOFFSET off = hgsmiMAAlloc(pMA, cb);
+ return HGSMIMAOffsetToPointer(pMA, off);
+}
+
+void HGSMIMAFree(HGSMIMADATA *pMA, void RT_UNTRUSTED_VOLATILE_GUEST *pv)
+{
+ HGSMIOFFSET off = HGSMIMAPointerToOffset(pMA, pv);
+ if (off != HGSMIOFFSET_VOID)
+ {
+ hgsmiMAFree(pMA, off);
+ }
+ else
+ {
+ HGSMI_ASSERT_FAILED();
+ }
+}
+
+HGSMIMABLOCK *HGSMIMASearchOffset(HGSMIMADATA *pMA, HGSMIOFFSET off)
+{
+ /* Binary search in the block list for the offset. */
+ HGSMIMABLOCK *pStart = RTListGetFirst(&pMA->listBlocks, HGSMIMABLOCK, nodeBlock);
+ HGSMIMABLOCK *pEnd = RTListGetLast(&pMA->listBlocks, HGSMIMABLOCK, nodeBlock);
+ HGSMIMABLOCK *pMiddle;
+
+ uint32_t iStart = 0;
+ uint32_t iEnd = pMA->cBlocks;
+ uint32_t iMiddle;
+
+ for (;;)
+ {
+ pMiddle = pStart;
+ iMiddle = iStart + (iEnd - iStart) / 2;
+ if (iMiddle == iStart)
+ {
+ break;
+ }
+
+ /* Find the block with the iMiddle index. Never go further than pEnd. */
+ uint32_t i;
+ for (i = iStart; i < iMiddle && pMiddle != pEnd; ++i)
+ {
+ pMiddle = RTListNodeGetNext(&pMiddle->nodeBlock, HGSMIMABLOCK, nodeBlock);
+ }
+
+ HGSMIOFFSET offMiddle = HGSMI_MA_DESC_OFFSET(pMiddle->descriptor);
+ if (offMiddle > off)
+ {
+ pEnd = pMiddle;
+ iEnd = iMiddle;
+ }
+ else
+ {
+ pStart = pMiddle;
+ iStart = iMiddle;
+ }
+ }
+
+ return pMiddle;
+}
+
+
+/*
+ * Helper.
+ */
+
+uint32_t HGSMIPopCnt32(uint32_t u32)
+{
+ uint32_t c = 0;
+ if (u32 > 0xFFFF) { c += 16; u32 >>= 16; }
+ if (u32 > 0xFF) { c += 8; u32 >>= 8; }
+ if (u32 > 0xF) { c += 4; u32 >>= 4; }
+ if (u32 > 0x3) { c += 2; u32 >>= 2; }
+ if (u32 > 0x1) { c += 1; u32 >>= 1; }
+ return c + u32;
+}
diff --git a/src/VBox/GuestHost/HGSMI/Makefile.kmk b/src/VBox/GuestHost/HGSMI/Makefile.kmk
new file mode 100644
index 00000000..51453b07
--- /dev/null
+++ b/src/VBox/GuestHost/HGSMI/Makefile.kmk
@@ -0,0 +1,59 @@
+# $Id: Makefile.kmk $
+## @file
+# Sub-Makefile for the common HGSMI library.
+#
+
+#
+# Copyright (C) 2006-2023 Oracle and/or its affiliates.
+#
+# This file is part of VirtualBox base platform packages, as
+# available from https://www.virtualbox.org.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation, in version 3 of the
+# License.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses>.
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+
+SUB_DEPTH = ../../../..
+include $(KBUILD_PATH)/subheader.kmk
+
+
+
+
+#
+# HGSMIGuestR0Lib - For guest additions drivers.
+#
+ifdef VBOX_WITH_ADDITION_DRIVERS
+ LIBRARIES += HGSMIGuestR0Lib
+ HGSMIGuestR0Lib_TEMPLATE = VBoxGuestR0DrvLib
+ HGSMIGuestR0Lib_DEFS =
+ HGSMIGuestR0Lib_INCS = $(VBOX_GRAPHICS_INCS)
+ HGSMIGuestR0Lib_SOURCES = \
+ HGSMICommon.cpp \
+ HGSMIMemAlloc.cpp
+endif
+
+#
+# HGSMIHostLib - For host devices (R3 only).
+#
+LIBRARIES += HGSMIHostR3Lib
+HGSMIHostR3Lib_TEMPLATE = VBoxR3Dll
+HGSMIHostR3Lib_DEFS =
+HGSMIHostR3Lib_INCS = $(VBOX_GRAPHICS_INCS)
+HGSMIHostR3Lib_SOURCES = \
+ HGSMICommon.cpp \
+ HGSMIMemAlloc.cpp
+
+include $(FILE_KBUILD_SUB_FOOTER)
+