summaryrefslogtreecommitdiffstats
path: root/src/VBox/Runtime/common/alloc
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/Runtime/common/alloc')
-rw-r--r--src/VBox/Runtime/common/alloc/Makefile.kup0
-rw-r--r--src/VBox/Runtime/common/alloc/alloc.cpp73
-rw-r--r--src/VBox/Runtime/common/alloc/heapoffset.cpp928
-rw-r--r--src/VBox/Runtime/common/alloc/heapsimple.cpp920
-rw-r--r--src/VBox/Runtime/common/alloc/memcache.cpp585
-rw-r--r--src/VBox/Runtime/common/alloc/memtracker.cpp1349
6 files changed, 3855 insertions, 0 deletions
diff --git a/src/VBox/Runtime/common/alloc/Makefile.kup b/src/VBox/Runtime/common/alloc/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Runtime/common/alloc/Makefile.kup
diff --git a/src/VBox/Runtime/common/alloc/alloc.cpp b/src/VBox/Runtime/common/alloc/alloc.cpp
new file mode 100644
index 00000000..9ac31c5c
--- /dev/null
+++ b/src/VBox/Runtime/common/alloc/alloc.cpp
@@ -0,0 +1,73 @@
+/* $Id: alloc.cpp $ */
+/** @file
+ * IPRT - Memory Allocation.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#ifndef RTMEM_NO_WRAP_TO_EF_APIS
+# define RTMEM_NO_WRAP_TO_EF_APIS
+#endif
+#include <iprt/mem.h>
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+#include <iprt/string.h>
+
+
+
+RTDECL(void *) RTMemDupTag(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW_DEF
+{
+ void *pvDst = RTMemAllocTag(cb, pszTag);
+ if (pvDst)
+ memcpy(pvDst, pvSrc, cb);
+ return pvDst;
+}
+RT_EXPORT_SYMBOL(RTMemDupTag);
+
+
+RTDECL(void *) RTMemDupExTag(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW_DEF
+{
+ void *pvDst = RTMemAllocTag(cbSrc + cbExtra, pszTag);
+ if (pvDst)
+ {
+ memcpy(pvDst, pvSrc, cbSrc);
+ memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
+ }
+ return pvDst;
+}
+RT_EXPORT_SYMBOL(RTMemDupExTag);
+
+
+RTDECL(void *) RTMemReallocZTag(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
+{
+ void *pvDst = RTMemReallocTag(pvOld, cbNew, pszTag);
+ if (pvDst && cbNew > cbOld)
+ memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
+ return pvDst;
+}
+RT_EXPORT_SYMBOL(RTMemReallocZTag);
+
diff --git a/src/VBox/Runtime/common/alloc/heapoffset.cpp b/src/VBox/Runtime/common/alloc/heapoffset.cpp
new file mode 100644
index 00000000..629decbd
--- /dev/null
+++ b/src/VBox/Runtime/common/alloc/heapoffset.cpp
@@ -0,0 +1,928 @@
+/* $Id: heapoffset.cpp $ */
+/** @file
+ * IPRT - An Offset Based Heap.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP RTLOGGROUP_DEFAULT
+#include <iprt/heap.h>
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/errcore.h>
+#include <iprt/log.h>
+#include <iprt/param.h>
+#include <iprt/string.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/** Pointer to the heap anchor block. */
+typedef struct RTHEAPOFFSETINTERNAL *PRTHEAPOFFSETINTERNAL;
+/** Pointer to a heap block. */
+typedef struct RTHEAPOFFSETBLOCK *PRTHEAPOFFSETBLOCK;
+/** Pointer to a free heap block. */
+typedef struct RTHEAPOFFSETFREE *PRTHEAPOFFSETFREE;
+
+/**
+ * Structure describing a block in an offset based heap.
+ *
+ * If this block is allocated, it is followed by the user data.
+ * If this block is free, see RTHEAPOFFSETFREE.
+ */
+typedef struct RTHEAPOFFSETBLOCK
+{
+ /** The next block in the global block list. */
+ uint32_t /*PRTHEAPOFFSETBLOCK*/ offNext;
+ /** The previous block in the global block list. */
+ uint32_t /*PRTHEAPOFFSETBLOCK*/ offPrev;
+ /** Offset into the heap of this block. Used to locate the anchor block. */
+ uint32_t /*PRTHEAPOFFSETINTERNAL*/ offSelf;
+ /** Flags + magic. */
+ uint32_t fFlags;
+} RTHEAPOFFSETBLOCK;
+AssertCompileSize(RTHEAPOFFSETBLOCK, 16);
+
+/** The block is free if this flag is set. When cleared it's allocated. */
+#define RTHEAPOFFSETBLOCK_FLAGS_FREE (RT_BIT_32(0))
+/** The magic value. */
+#define RTHEAPOFFSETBLOCK_FLAGS_MAGIC (UINT32_C(0xabcdef00))
+/** The mask that needs to be applied to RTHEAPOFFSETBLOCK::fFlags to obtain the magic value. */
+#define RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK (~RT_BIT_32(0))
+
+/**
+ * Checks if the specified block is valid or not.
+ * @returns boolean answer.
+ * @param pBlock Pointer to a RTHEAPOFFSETBLOCK structure.
+ */
+#define RTHEAPOFFSETBLOCK_IS_VALID(pBlock) \
+ ( ((pBlock)->fFlags & RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK) == RTHEAPOFFSETBLOCK_FLAGS_MAGIC )
+
+/**
+ * Checks if the specified block is valid and in use.
+ * @returns boolean answer.
+ * @param pBlock Pointer to a RTHEAPOFFSETBLOCK structure.
+ */
+#define RTHEAPOFFSETBLOCK_IS_VALID_USED(pBlock) \
+ ( ((pBlock)->fFlags & (RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK | RTHEAPOFFSETBLOCK_FLAGS_FREE)) \
+ == RTHEAPOFFSETBLOCK_FLAGS_MAGIC )
+
+/**
+ * Checks if the specified block is valid and free.
+ * @returns boolean answer.
+ * @param pBlock Pointer to a RTHEAPOFFSETBLOCK structure.
+ */
+#define RTHEAPOFFSETBLOCK_IS_VALID_FREE(pBlock) \
+ ( ((pBlock)->fFlags & (RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK | RTHEAPOFFSETBLOCK_FLAGS_FREE)) \
+ == (RTHEAPOFFSETBLOCK_FLAGS_MAGIC | RTHEAPOFFSETBLOCK_FLAGS_FREE) )
+
+/**
+ * Checks if the specified block is free or not.
+ * @returns boolean answer.
+ * @param pBlock Pointer to a valid RTHEAPOFFSETBLOCK structure.
+ */
+#define RTHEAPOFFSETBLOCK_IS_FREE(pBlock) (!!((pBlock)->fFlags & RTHEAPOFFSETBLOCK_FLAGS_FREE))
+
+/**
+ * A free heap block.
+ * This is an extended version of RTHEAPOFFSETBLOCK that takes the unused
+ * user data to store free list pointers and a cached size value.
+ */
+typedef struct RTHEAPOFFSETFREE
+{
+ /** Core stuff. */
+ RTHEAPOFFSETBLOCK Core;
+ /** Pointer to the next free block. */
+ uint32_t /*PRTHEAPOFFSETFREE*/ offNext;
+ /** Pointer to the previous free block. */
+ uint32_t /*PRTHEAPOFFSETFREE*/ offPrev;
+ /** The size of the block (excluding the RTHEAPOFFSETBLOCK part). */
+ uint32_t cb;
+ /** An alignment filler to make it a multiple of 16 bytes. */
+ uint32_t Alignment;
+} RTHEAPOFFSETFREE;
+AssertCompileSize(RTHEAPOFFSETFREE, 16+16);
+
+
+/**
+ * The heap anchor block.
+ * This structure is placed at the head of the memory block specified to RTHeapOffsetInit(),
+ * which means that the first RTHEAPOFFSETBLOCK appears immediately after this structure.
+ */
+typedef struct RTHEAPOFFSETINTERNAL
+{
+ /** The typical magic (RTHEAPOFFSET_MAGIC). */
+ uint32_t u32Magic;
+ /** The heap size. (This structure is included!) */
+ uint32_t cbHeap;
+ /** The amount of free memory in the heap. */
+ uint32_t cbFree;
+ /** Free head pointer. */
+ uint32_t /*PRTHEAPOFFSETFREE*/ offFreeHead;
+ /** Free tail pointer. */
+ uint32_t /*PRTHEAPOFFSETFREE*/ offFreeTail;
+ /** Make the size of this structure 32 bytes. */
+ uint32_t au32Alignment[3];
+} RTHEAPOFFSETINTERNAL;
+AssertCompileSize(RTHEAPOFFSETINTERNAL, 32);
+
+
+/** The minimum allocation size. */
+#define RTHEAPOFFSET_MIN_BLOCK (sizeof(RTHEAPOFFSETBLOCK))
+AssertCompile(RTHEAPOFFSET_MIN_BLOCK >= sizeof(RTHEAPOFFSETBLOCK));
+AssertCompile(RTHEAPOFFSET_MIN_BLOCK >= sizeof(RTHEAPOFFSETFREE) - sizeof(RTHEAPOFFSETBLOCK));
+
+/** The minimum and default alignment. */
+#define RTHEAPOFFSET_ALIGNMENT (sizeof(RTHEAPOFFSETBLOCK))
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#ifdef RT_STRICT
+# define RTHEAPOFFSET_STRICT 1
+#endif
+
+/**
+ * Converts RTHEAPOFFSETBLOCK::offSelf into a heap anchor block pointer.
+ *
+ * @returns Pointer of given type.
+ * @param pBlock The block to find the heap anchor block for.
+ */
+#define RTHEAPOFF_GET_ANCHOR(pBlock) ( (PRTHEAPOFFSETINTERNAL)((uint8_t *)(pBlock) - (pBlock)->offSelf ) )
+
+
+/**
+ * Converts an offset to a pointer.
+ *
+ * All offsets are relative to the heap to make life simple.
+ *
+ * @returns Pointer of given type.
+ * @param pHeapInt Pointer to the heap anchor block.
+ * @param off The offset to convert.
+ * @param type The desired type.
+ */
+#ifdef RTHEAPOFFSET_STRICT
+# define RTHEAPOFF_TO_PTR_N(pHeapInt, off, type) ( (type)rtHeapOffCheckedOffToPtr(pHeapInt, off, true /*fNull*/) )
+#else
+# define RTHEAPOFF_TO_PTR_N(pHeapInt, off, type) ( (type)((off) ? (uint8_t *)(pHeapInt) + (off) : NULL) )
+#endif
+
+/**
+ * Converts an offset to a pointer.
+ *
+ * All offsets are relative to the heap to make life simple.
+ *
+ * @returns Pointer of given type.
+ * @param pHeapInt Pointer to the heap anchor block.
+ * @param off The offset to convert.
+ * @param type The desired type.
+ */
+#ifdef RTHEAPOFFSET_STRICT
+# define RTHEAPOFF_TO_PTR(pHeapInt, off, type) ( (type)rtHeapOffCheckedOffToPtr(pHeapInt, off, false /*fNull*/) )
+#else
+# define RTHEAPOFF_TO_PTR(pHeapInt, off, type) ( (type)((uint8_t *)(pHeapInt) + (off)) )
+#endif
+
+/**
+ * Converts a pointer to an offset.
+ *
+ * All offsets are relative to the heap to make life simple.
+ *
+ * @returns Offset into the heap.
+ * @param pHeapInt Pointer to the heap anchor block.
+ * @param ptr The pointer to convert.
+ */
+#ifdef RTHEAPOFFSET_STRICT
+# define RTHEAPOFF_TO_OFF(pHeapInt, ptr) rtHeapOffCheckedPtrToOff(pHeapInt, ptr)
+#else
+# define RTHEAPOFF_TO_OFF(pHeapInt, ptr) ( (uint32_t)((ptr) ? (uintptr_t)(ptr) - (uintptr_t)(pHeapInt) : UINT32_C(0)) )
+#endif
+
+#define ASSERT_L(a, b) AssertMsg((a) < (b), ("a=%08x b=%08x\n", (a), (b)))
+#define ASSERT_LE(a, b) AssertMsg((a) <= (b), ("a=%08x b=%08x\n", (a), (b)))
+#define ASSERT_G(a, b) AssertMsg((a) > (b), ("a=%08x b=%08x\n", (a), (b)))
+#define ASSERT_GE(a, b) AssertMsg((a) >= (b), ("a=%08x b=%08x\n", (a), (b)))
+#define ASSERT_ALIGN(a) AssertMsg(!((uintptr_t)(a) & (RTHEAPOFFSET_ALIGNMENT - 1)), ("a=%p\n", (uintptr_t)(a)))
+
+#define ASSERT_PREV(pHeapInt, pBlock) \
+ do { ASSERT_ALIGN((pBlock)->offPrev); \
+ if ((pBlock)->offPrev) \
+ { \
+ ASSERT_L((pBlock)->offPrev, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
+ ASSERT_GE((pBlock)->offPrev, sizeof(RTHEAPOFFSETINTERNAL)); \
+ } \
+ else \
+ Assert((pBlock) == (PRTHEAPOFFSETBLOCK)((pHeapInt) + 1)); \
+ } while (0)
+
+#define ASSERT_NEXT(pHeap, pBlock) \
+ do { ASSERT_ALIGN((pBlock)->offNext); \
+ if ((pBlock)->offNext) \
+ { \
+ ASSERT_L((pBlock)->offNext, (pHeapInt)->cbHeap); \
+ ASSERT_G((pBlock)->offNext, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
+ } \
+ } while (0)
+
+#define ASSERT_BLOCK(pHeapInt, pBlock) \
+ do { AssertMsg(RTHEAPOFFSETBLOCK_IS_VALID(pBlock), ("%#x\n", (pBlock)->fFlags)); \
+ AssertMsg(RTHEAPOFF_GET_ANCHOR(pBlock) == (pHeapInt), ("%p != %p\n", RTHEAPOFF_GET_ANCHOR(pBlock), (pHeapInt))); \
+ ASSERT_GE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), sizeof(RTHEAPOFFSETINTERNAL)); \
+ ASSERT_L( RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->cbHeap); \
+ ASSERT_NEXT(pHeapInt, pBlock); \
+ ASSERT_PREV(pHeapInt, pBlock); \
+ } while (0)
+
+#define ASSERT_BLOCK_USED(pHeapInt, pBlock) \
+ do { AssertMsg(RTHEAPOFFSETBLOCK_IS_VALID_USED((pBlock)), ("%#x\n", (pBlock)->fFlags)); \
+ AssertMsg(RTHEAPOFF_GET_ANCHOR(pBlock) == (pHeapInt), ("%p != %p\n", RTHEAPOFF_GET_ANCHOR(pBlock), (pHeapInt))); \
+ ASSERT_GE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), sizeof(RTHEAPOFFSETINTERNAL)); \
+ ASSERT_L( RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->cbHeap); \
+ ASSERT_NEXT(pHeapInt, pBlock); \
+ ASSERT_PREV(pHeapInt, pBlock); \
+ } while (0)
+
+#define ASSERT_FREE_PREV(pHeapInt, pBlock) \
+ do { ASSERT_ALIGN((pBlock)->offPrev); \
+ if ((pBlock)->offPrev) \
+ { \
+ ASSERT_GE((pBlock)->offPrev, (pHeapInt)->offFreeHead); \
+ ASSERT_L((pBlock)->offPrev, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
+ ASSERT_LE((pBlock)->offPrev, (pBlock)->Core.offPrev); \
+ } \
+ else \
+ Assert((pBlock) == RTHEAPOFF_TO_PTR(pHeapInt, (pHeapInt)->offFreeHead, PRTHEAPOFFSETFREE) ); \
+ } while (0)
+
+#define ASSERT_FREE_NEXT(pHeapInt, pBlock) \
+ do { ASSERT_ALIGN((pBlock)->offNext); \
+ if ((pBlock)->offNext) \
+ { \
+ ASSERT_LE((pBlock)->offNext, (pHeapInt)->offFreeTail); \
+ ASSERT_G((pBlock)->offNext, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
+ ASSERT_GE((pBlock)->offNext, (pBlock)->Core.offNext); \
+ } \
+ else \
+ Assert((pBlock) == RTHEAPOFF_TO_PTR(pHeapInt, (pHeapInt)->offFreeTail, PRTHEAPOFFSETFREE)); \
+ } while (0)
+
+#ifdef RTHEAPOFFSET_STRICT
+# define ASSERT_FREE_CB(pHeapInt, pBlock) \
+ do { size_t cbCalc = ((pBlock)->Core.offNext ? (pBlock)->Core.offNext : (pHeapInt)->cbHeap) \
+ - RTHEAPOFF_TO_OFF((pHeapInt), (pBlock)) - sizeof(RTHEAPOFFSETBLOCK); \
+ AssertMsg((pBlock)->cb == cbCalc, ("cb=%#zx cbCalc=%#zx\n", (pBlock)->cb, cbCalc)); \
+ } while (0)
+#else
+# define ASSERT_FREE_CB(pHeapInt, pBlock) do {} while (0)
+#endif
+
+/** Asserts that a free block is valid. */
+#define ASSERT_BLOCK_FREE(pHeapInt, pBlock) \
+ do { ASSERT_BLOCK(pHeapInt, &(pBlock)->Core); \
+ Assert(RTHEAPOFFSETBLOCK_IS_VALID_FREE(&(pBlock)->Core)); \
+ ASSERT_GE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->offFreeHead); \
+ ASSERT_LE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->offFreeTail); \
+ ASSERT_FREE_NEXT(pHeapInt, pBlock); \
+ ASSERT_FREE_PREV(pHeapInt, pBlock); \
+ ASSERT_FREE_CB(pHeapInt, pBlock); \
+ } while (0)
+
+/** Asserts that the heap anchor block is ok. */
+#define ASSERT_ANCHOR(pHeapInt) \
+ do { AssertPtr(pHeapInt);\
+ Assert((pHeapInt)->u32Magic == RTHEAPOFFSET_MAGIC); \
+ } while (0)
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+#ifdef RTHEAPOFFSET_STRICT
+static void rtHeapOffsetAssertAll(PRTHEAPOFFSETINTERNAL pHeapInt);
+#endif
+static PRTHEAPOFFSETBLOCK rtHeapOffsetAllocBlock(PRTHEAPOFFSETINTERNAL pHeapInt, size_t cb, size_t uAlignment);
+static void rtHeapOffsetFreeBlock(PRTHEAPOFFSETINTERNAL pHeapInt, PRTHEAPOFFSETBLOCK pBlock);
+
+#ifdef RTHEAPOFFSET_STRICT
+
+/** Checked version of RTHEAPOFF_TO_PTR and RTHEAPOFF_TO_PTR_N. */
+static void *rtHeapOffCheckedOffToPtr(PRTHEAPOFFSETINTERNAL pHeapInt, uint32_t off, bool fNull)
+{
+ Assert(off || fNull);
+ if (!off)
+ return NULL;
+ AssertMsg(off < pHeapInt->cbHeap, ("%#x %#x\n", off, pHeapInt->cbHeap));
+ AssertMsg(off >= sizeof(*pHeapInt), ("%#x %#x\n", off, sizeof(*pHeapInt)));
+ return (uint8_t *)pHeapInt + off;
+}
+
+/** Checked version of RTHEAPOFF_TO_OFF. */
+static uint32_t rtHeapOffCheckedPtrToOff(PRTHEAPOFFSETINTERNAL pHeapInt, void *pv)
+{
+ if (!pv)
+ return 0;
+ uintptr_t off = (uintptr_t)pv - (uintptr_t)pHeapInt;
+ AssertMsg(off < pHeapInt->cbHeap, ("%#x %#x\n", off, pHeapInt->cbHeap));
+ AssertMsg(off >= sizeof(*pHeapInt), ("%#x %#x\n", off, sizeof(*pHeapInt)));
+ return (uint32_t)off;
+}
+
+#endif /* RTHEAPOFFSET_STRICT */
+
+
+
+RTDECL(int) RTHeapOffsetInit(PRTHEAPOFFSET phHeap, void *pvMemory, size_t cbMemory)
+{
+ PRTHEAPOFFSETINTERNAL pHeapInt;
+ PRTHEAPOFFSETFREE pFree;
+ unsigned i;
+
+ /*
+ * Validate input. The imposed minimum heap size is just a convenient value.
+ */
+ AssertReturn(cbMemory >= PAGE_SIZE, VERR_INVALID_PARAMETER);
+ AssertReturn(cbMemory < UINT32_MAX, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pvMemory, VERR_INVALID_POINTER);
+ AssertReturn((uintptr_t)pvMemory + (cbMemory - 1) > (uintptr_t)cbMemory, VERR_INVALID_PARAMETER);
+
+ /*
+ * Place the heap anchor block at the start of the heap memory,
+ * enforce 32 byte alignment of it. Also align the heap size correctly.
+ */
+ pHeapInt = (PRTHEAPOFFSETINTERNAL)pvMemory;
+ if ((uintptr_t)pvMemory & 31)
+ {
+ const uintptr_t off = 32 - ((uintptr_t)pvMemory & 31);
+ cbMemory -= off;
+ pHeapInt = (PRTHEAPOFFSETINTERNAL)((uintptr_t)pvMemory + off);
+ }
+ cbMemory &= ~(RTHEAPOFFSET_ALIGNMENT - 1);
+
+
+ /* Init the heap anchor block. */
+ pHeapInt->u32Magic = RTHEAPOFFSET_MAGIC;
+ pHeapInt->cbHeap = (uint32_t)cbMemory;
+ pHeapInt->cbFree = (uint32_t)cbMemory
+ - sizeof(RTHEAPOFFSETBLOCK)
+ - sizeof(RTHEAPOFFSETINTERNAL);
+ pHeapInt->offFreeTail = pHeapInt->offFreeHead = sizeof(*pHeapInt);
+ for (i = 0; i < RT_ELEMENTS(pHeapInt->au32Alignment); i++)
+ pHeapInt->au32Alignment[i] = UINT32_MAX;
+
+ /* Init the single free block. */
+ pFree = RTHEAPOFF_TO_PTR(pHeapInt, pHeapInt->offFreeHead, PRTHEAPOFFSETFREE);
+ pFree->Core.offNext = 0;
+ pFree->Core.offPrev = 0;
+ pFree->Core.offSelf = pHeapInt->offFreeHead;
+ pFree->Core.fFlags = RTHEAPOFFSETBLOCK_FLAGS_MAGIC | RTHEAPOFFSETBLOCK_FLAGS_FREE;
+ pFree->offNext = 0;
+ pFree->offPrev = 0;
+ pFree->cb = pHeapInt->cbFree;
+
+ *phHeap = pHeapInt;
+
+#ifdef RTHEAPOFFSET_STRICT
+ rtHeapOffsetAssertAll(pHeapInt);
+#endif
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTHeapOffsetInit);
+
+
+RTDECL(void *) RTHeapOffsetAlloc(RTHEAPOFFSET hHeap, size_t cb, size_t cbAlignment)
+{
+ PRTHEAPOFFSETINTERNAL pHeapInt = hHeap;
+ PRTHEAPOFFSETBLOCK pBlock;
+
+ /*
+ * Validate and adjust the input.
+ */
+ AssertPtrReturn(pHeapInt, NULL);
+ if (cb < RTHEAPOFFSET_MIN_BLOCK)
+ cb = RTHEAPOFFSET_MIN_BLOCK;
+ else
+ cb = RT_ALIGN_Z(cb, RTHEAPOFFSET_ALIGNMENT);
+ if (!cbAlignment)
+ cbAlignment = RTHEAPOFFSET_ALIGNMENT;
+ else
+ {
+ Assert(!(cbAlignment & (cbAlignment - 1)));
+ Assert((cbAlignment & ~(cbAlignment - 1)) == cbAlignment);
+ if (cbAlignment < RTHEAPOFFSET_ALIGNMENT)
+ cbAlignment = RTHEAPOFFSET_ALIGNMENT;
+ }
+
+ /*
+ * Do the allocation.
+ */
+ pBlock = rtHeapOffsetAllocBlock(pHeapInt, cb, cbAlignment);
+ if (RT_LIKELY(pBlock))
+ {
+ void *pv = pBlock + 1;
+ return pv;
+ }
+ return NULL;
+}
+RT_EXPORT_SYMBOL(RTHeapOffsetAlloc);
+
+
+RTDECL(void *) RTHeapOffsetAllocZ(RTHEAPOFFSET hHeap, size_t cb, size_t cbAlignment)
+{
+ PRTHEAPOFFSETINTERNAL pHeapInt = hHeap;
+ PRTHEAPOFFSETBLOCK pBlock;
+
+ /*
+ * Validate and adjust the input.
+ */
+ AssertPtrReturn(pHeapInt, NULL);
+ if (cb < RTHEAPOFFSET_MIN_BLOCK)
+ cb = RTHEAPOFFSET_MIN_BLOCK;
+ else
+ cb = RT_ALIGN_Z(cb, RTHEAPOFFSET_ALIGNMENT);
+ if (!cbAlignment)
+ cbAlignment = RTHEAPOFFSET_ALIGNMENT;
+ else
+ {
+ Assert(!(cbAlignment & (cbAlignment - 1)));
+ Assert((cbAlignment & ~(cbAlignment - 1)) == cbAlignment);
+ if (cbAlignment < RTHEAPOFFSET_ALIGNMENT)
+ cbAlignment = RTHEAPOFFSET_ALIGNMENT;
+ }
+
+ /*
+ * Do the allocation.
+ */
+ pBlock = rtHeapOffsetAllocBlock(pHeapInt, cb, cbAlignment);
+ if (RT_LIKELY(pBlock))
+ {
+ void *pv = pBlock + 1;
+ memset(pv, 0, cb);
+ return pv;
+ }
+ return NULL;
+}
+RT_EXPORT_SYMBOL(RTHeapOffsetAllocZ);
+
+
+/**
+ * Allocates a block of memory from the specified heap.
+ *
+ * No parameter validation or adjustment is performed.
+ *
+ * @returns Pointer to the allocated block.
+ * @returns NULL on failure.
+ *
+ * @param pHeapInt The heap.
+ * @param cb Size of the memory block to allocate.
+ * @param uAlignment The alignment specifications for the allocated block.
+ */
+static PRTHEAPOFFSETBLOCK rtHeapOffsetAllocBlock(PRTHEAPOFFSETINTERNAL pHeapInt, size_t cb, size_t uAlignment)
+{
+ PRTHEAPOFFSETBLOCK pRet = NULL;
+ PRTHEAPOFFSETFREE pFree;
+
+ AssertReturn((pHeapInt)->u32Magic == RTHEAPOFFSET_MAGIC, NULL);
+#ifdef RTHEAPOFFSET_STRICT
+ rtHeapOffsetAssertAll(pHeapInt);
+#endif
+
+ /*
+ * Search for a fitting block from the lower end of the heap.
+ */
+ for (pFree = RTHEAPOFF_TO_PTR_N(pHeapInt, pHeapInt->offFreeHead, PRTHEAPOFFSETFREE);
+ pFree;
+ pFree = RTHEAPOFF_TO_PTR_N(pHeapInt, pFree->offNext, PRTHEAPOFFSETFREE))
+ {
+ uintptr_t offAlign;
+ ASSERT_BLOCK_FREE(pHeapInt, pFree);
+
+ /*
+ * Match for size and alignment.
+ */
+ if (pFree->cb < cb)
+ continue;
+ offAlign = (uintptr_t)(&pFree->Core + 1) & (uAlignment - 1);
+ if (offAlign)
+ {
+ PRTHEAPOFFSETFREE pPrev;
+
+ offAlign = (uintptr_t)(&pFree[1].Core + 1) & (uAlignment - 1);
+ offAlign = uAlignment - offAlign;
+ if (pFree->cb < cb + offAlign + sizeof(RTHEAPOFFSETFREE))
+ continue;
+
+ /*
+ * Split up the free block into two, so that the 2nd is aligned as
+ * per specification.
+ */
+ pPrev = pFree;
+ pFree = (PRTHEAPOFFSETFREE)((uintptr_t)(pFree + 1) + offAlign);
+ pFree->Core.offPrev = pPrev->Core.offSelf;
+ pFree->Core.offNext = pPrev->Core.offNext;
+ pFree->Core.offSelf = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
+ pFree->Core.fFlags = RTHEAPOFFSETBLOCK_FLAGS_MAGIC | RTHEAPOFFSETBLOCK_FLAGS_FREE;
+ pFree->offPrev = pPrev->Core.offSelf;
+ pFree->offNext = pPrev->offNext;
+ pFree->cb = (pFree->Core.offNext ? pFree->Core.offNext : pHeapInt->cbHeap)
+ - pFree->Core.offSelf - sizeof(RTHEAPOFFSETBLOCK);
+
+ pPrev->Core.offNext = pFree->Core.offSelf;
+ pPrev->offNext = pFree->Core.offSelf;
+ pPrev->cb = pFree->Core.offSelf - pPrev->Core.offSelf - sizeof(RTHEAPOFFSETBLOCK);
+
+ if (pFree->Core.offNext)
+ RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = pFree->Core.offSelf;
+ if (pFree->offNext)
+ RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETFREE)->offPrev = pFree->Core.offSelf;
+ else
+ pHeapInt->offFreeTail = pFree->Core.offSelf;
+
+ pHeapInt->cbFree -= sizeof(RTHEAPOFFSETBLOCK);
+ ASSERT_BLOCK_FREE(pHeapInt, pPrev);
+ ASSERT_BLOCK_FREE(pHeapInt, pFree);
+ }
+
+ /*
+ * Split off a new FREE block?
+ */
+ if (pFree->cb >= cb + RT_ALIGN_Z(sizeof(RTHEAPOFFSETFREE), RTHEAPOFFSET_ALIGNMENT))
+ {
+ /*
+ * Create a new FREE block at then end of this one.
+ */
+ PRTHEAPOFFSETFREE pNew = (PRTHEAPOFFSETFREE)((uintptr_t)&pFree->Core + cb + sizeof(RTHEAPOFFSETBLOCK));
+
+ pNew->Core.offSelf = RTHEAPOFF_TO_OFF(pHeapInt, pNew);
+ pNew->Core.offNext = pFree->Core.offNext;
+ if (pFree->Core.offNext)
+ RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = pNew->Core.offSelf;
+ pNew->Core.offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
+ pNew->Core.fFlags = RTHEAPOFFSETBLOCK_FLAGS_MAGIC | RTHEAPOFFSETBLOCK_FLAGS_FREE;
+
+ pNew->offNext = pFree->offNext;
+ if (pNew->offNext)
+ RTHEAPOFF_TO_PTR(pHeapInt, pNew->offNext, PRTHEAPOFFSETFREE)->offPrev = pNew->Core.offSelf;
+ else
+ pHeapInt->offFreeTail = pNew->Core.offSelf;
+ pNew->offPrev = pFree->offPrev;
+ if (pNew->offPrev)
+ RTHEAPOFF_TO_PTR(pHeapInt, pNew->offPrev, PRTHEAPOFFSETFREE)->offNext = pNew->Core.offSelf;
+ else
+ pHeapInt->offFreeHead = pNew->Core.offSelf;
+ pNew->cb = (pNew->Core.offNext ? pNew->Core.offNext : pHeapInt->cbHeap) \
+ - pNew->Core.offSelf - sizeof(RTHEAPOFFSETBLOCK);
+ ASSERT_BLOCK_FREE(pHeapInt, pNew);
+
+ /*
+ * Adjust and convert the old FREE node into a USED node.
+ */
+ pFree->Core.fFlags &= ~RTHEAPOFFSETBLOCK_FLAGS_FREE;
+ pFree->Core.offNext = pNew->Core.offSelf;
+ pHeapInt->cbFree -= pFree->cb;
+ pHeapInt->cbFree += pNew->cb;
+ pRet = &pFree->Core;
+ ASSERT_BLOCK_USED(pHeapInt, pRet);
+ }
+ else
+ {
+ /*
+ * Link it out of the free list.
+ */
+ if (pFree->offNext)
+ RTHEAPOFF_TO_PTR(pHeapInt, pFree->offNext, PRTHEAPOFFSETFREE)->offPrev = pFree->offPrev;
+ else
+ pHeapInt->offFreeTail = pFree->offPrev;
+ if (pFree->offPrev)
+ RTHEAPOFF_TO_PTR(pHeapInt, pFree->offPrev, PRTHEAPOFFSETFREE)->offNext = pFree->offNext;
+ else
+ pHeapInt->offFreeHead = pFree->offNext;
+
+ /*
+ * Convert it to a used block.
+ */
+ pHeapInt->cbFree -= pFree->cb;
+ pFree->Core.fFlags &= ~RTHEAPOFFSETBLOCK_FLAGS_FREE;
+ pRet = &pFree->Core;
+ ASSERT_BLOCK_USED(pHeapInt, pRet);
+ }
+ break;
+ }
+
+#ifdef RTHEAPOFFSET_STRICT
+ rtHeapOffsetAssertAll(pHeapInt);
+#endif
+ return pRet;
+}
+
+
+RTDECL(void) RTHeapOffsetFree(RTHEAPOFFSET hHeap, void *pv)
+{
+ PRTHEAPOFFSETINTERNAL pHeapInt;
+ PRTHEAPOFFSETBLOCK pBlock;
+
+ /*
+ * Validate input.
+ */
+ if (!pv)
+ return;
+ AssertPtr(pv);
+ Assert(RT_ALIGN_P(pv, RTHEAPOFFSET_ALIGNMENT) == pv);
+
+ /*
+ * Get the block and heap. If in strict mode, validate these.
+ */
+ pBlock = (PRTHEAPOFFSETBLOCK)pv - 1;
+ pHeapInt = RTHEAPOFF_GET_ANCHOR(pBlock);
+ ASSERT_BLOCK_USED(pHeapInt, pBlock);
+ ASSERT_ANCHOR(pHeapInt);
+ Assert(pHeapInt == (PRTHEAPOFFSETINTERNAL)hHeap || !hHeap); RT_NOREF_PV(hHeap);
+
+#ifdef RTHEAPOFFSET_FREE_POISON
+ /*
+ * Poison the block.
+ */
+ const size_t cbBlock = (pBlock->pNext ? (uintptr_t)pBlock->pNext : (uintptr_t)pHeapInt->pvEnd)
+ - (uintptr_t)pBlock - sizeof(RTHEAPOFFSETBLOCK);
+ memset(pBlock + 1, RTHEAPOFFSET_FREE_POISON, cbBlock);
+#endif
+
+ /*
+ * Call worker which does the actual job.
+ */
+ rtHeapOffsetFreeBlock(pHeapInt, pBlock);
+}
+RT_EXPORT_SYMBOL(RTHeapOffsetFree);
+
+
+/**
+ * Free a memory block.
+ *
+ * @param pHeapInt The heap.
+ * @param pBlock The memory block to free.
+ */
+static void rtHeapOffsetFreeBlock(PRTHEAPOFFSETINTERNAL pHeapInt, PRTHEAPOFFSETBLOCK pBlock)
+{
+ PRTHEAPOFFSETFREE pFree = (PRTHEAPOFFSETFREE)pBlock;
+ PRTHEAPOFFSETFREE pLeft;
+ PRTHEAPOFFSETFREE pRight;
+
+#ifdef RTHEAPOFFSET_STRICT
+ rtHeapOffsetAssertAll(pHeapInt);
+#endif
+
+ /*
+ * Look for the closest free list blocks by walking the blocks right
+ * of us (both lists are sorted by address).
+ */
+ pLeft = NULL;
+ pRight = NULL;
+ if (pHeapInt->offFreeTail)
+ {
+ pRight = RTHEAPOFF_TO_PTR_N(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETFREE);
+ while (pRight && !RTHEAPOFFSETBLOCK_IS_FREE(&pRight->Core))
+ {
+ ASSERT_BLOCK(pHeapInt, &pRight->Core);
+ pRight = RTHEAPOFF_TO_PTR_N(pHeapInt, pRight->Core.offNext, PRTHEAPOFFSETFREE);
+ }
+ if (!pRight)
+ pLeft = RTHEAPOFF_TO_PTR_N(pHeapInt, pHeapInt->offFreeTail, PRTHEAPOFFSETFREE);
+ else
+ {
+ ASSERT_BLOCK_FREE(pHeapInt, pRight);
+ pLeft = RTHEAPOFF_TO_PTR_N(pHeapInt, pRight->offPrev, PRTHEAPOFFSETFREE);
+ }
+ if (pLeft)
+ ASSERT_BLOCK_FREE(pHeapInt, pLeft);
+ }
+ AssertMsgReturnVoid(pLeft != pFree, ("Freed twice! pv=%p (pBlock=%p)\n", pBlock + 1, pBlock));
+ ASSERT_L(RTHEAPOFF_TO_OFF(pHeapInt, pLeft), RTHEAPOFF_TO_OFF(pHeapInt, pFree));
+ Assert(!pRight || (uintptr_t)pRight > (uintptr_t)pFree);
+ Assert(!pLeft || RTHEAPOFF_TO_PTR_N(pHeapInt, pLeft->offNext, PRTHEAPOFFSETFREE) == pRight);
+
+ /*
+ * Insert at the head of the free block list?
+ */
+ if (!pLeft)
+ {
+ Assert(pRight == RTHEAPOFF_TO_PTR_N(pHeapInt, pHeapInt->offFreeHead, PRTHEAPOFFSETFREE));
+ pFree->Core.fFlags |= RTHEAPOFFSETBLOCK_FLAGS_FREE;
+ pFree->offPrev = 0;
+ pFree->offNext = RTHEAPOFF_TO_OFF(pHeapInt, pRight);
+ if (pRight)
+ pRight->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
+ else
+ pHeapInt->offFreeTail = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
+ pHeapInt->offFreeHead = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
+ }
+ else
+ {
+ /*
+ * Can we merge with left hand free block?
+ */
+ if (pLeft->Core.offNext == RTHEAPOFF_TO_OFF(pHeapInt, pFree))
+ {
+ pLeft->Core.offNext = pFree->Core.offNext;
+ if (pFree->Core.offNext)
+ RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pLeft);
+ pHeapInt->cbFree -= pLeft->cb;
+ pFree = pLeft;
+ }
+ /*
+ * No, just link it into the free list then.
+ */
+ else
+ {
+ pFree->Core.fFlags |= RTHEAPOFFSETBLOCK_FLAGS_FREE;
+ pFree->offNext = RTHEAPOFF_TO_OFF(pHeapInt, pRight);
+ pFree->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pLeft);
+ pLeft->offNext = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
+ if (pRight)
+ pRight->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
+ else
+ pHeapInt->offFreeTail = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
+ }
+ }
+
+ /*
+ * Can we merge with right hand free block?
+ */
+ if ( pRight
+ && pRight->Core.offPrev == RTHEAPOFF_TO_OFF(pHeapInt, pFree))
+ {
+ /* core */
+ pFree->Core.offNext = pRight->Core.offNext;
+ if (pRight->Core.offNext)
+ RTHEAPOFF_TO_PTR(pHeapInt, pRight->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
+
+ /* free */
+ pFree->offNext = pRight->offNext;
+ if (pRight->offNext)
+ RTHEAPOFF_TO_PTR(pHeapInt, pRight->offNext, PRTHEAPOFFSETFREE)->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
+ else
+ pHeapInt->offFreeTail = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
+ pHeapInt->cbFree -= pRight->cb;
+ }
+
+ /*
+ * Calculate the size and update free stats.
+ */
+ pFree->cb = (pFree->Core.offNext ? pFree->Core.offNext : pHeapInt->cbHeap)
+ - RTHEAPOFF_TO_OFF(pHeapInt, pFree) - sizeof(RTHEAPOFFSETBLOCK);
+ pHeapInt->cbFree += pFree->cb;
+ ASSERT_BLOCK_FREE(pHeapInt, pFree);
+
+#ifdef RTHEAPOFFSET_STRICT
+ rtHeapOffsetAssertAll(pHeapInt);
+#endif
+}
+
+
+#ifdef RTHEAPOFFSET_STRICT
+/**
+ * Internal consistency check (relying on assertions).
+ * @param pHeapInt
+ */
+static void rtHeapOffsetAssertAll(PRTHEAPOFFSETINTERNAL pHeapInt)
+{
+ PRTHEAPOFFSETFREE pPrev = NULL;
+ PRTHEAPOFFSETFREE pPrevFree = NULL;
+ PRTHEAPOFFSETFREE pBlock;
+ for (pBlock = (PRTHEAPOFFSETFREE)(pHeapInt + 1);
+ pBlock;
+ pBlock = RTHEAPOFF_TO_PTR_N(pHeapInt, pBlock->Core.offNext, PRTHEAPOFFSETFREE))
+ {
+ if (RTHEAPOFFSETBLOCK_IS_FREE(&pBlock->Core))
+ {
+ ASSERT_BLOCK_FREE(pHeapInt, pBlock);
+ Assert(pBlock->offPrev == RTHEAPOFF_TO_OFF(pHeapInt, pPrevFree));
+ Assert(pPrevFree || pHeapInt->offFreeHead == RTHEAPOFF_TO_OFF(pHeapInt, pBlock));
+ pPrevFree = pBlock;
+ }
+ else
+ ASSERT_BLOCK_USED(pHeapInt, &pBlock->Core);
+ Assert(!pPrev || RTHEAPOFF_TO_OFF(pHeapInt, pPrev) == pBlock->Core.offPrev);
+ pPrev = pBlock;
+ }
+ Assert(pHeapInt->offFreeTail == RTHEAPOFF_TO_OFF(pHeapInt, pPrevFree));
+}
+#endif
+
+
+RTDECL(size_t) RTHeapOffsetSize(RTHEAPOFFSET hHeap, void *pv)
+{
+ PRTHEAPOFFSETINTERNAL pHeapInt;
+ PRTHEAPOFFSETBLOCK pBlock;
+ size_t cbBlock;
+
+ /*
+ * Validate input.
+ */
+ if (!pv)
+ return 0;
+ AssertPtrReturn(pv, 0);
+ AssertReturn(RT_ALIGN_P(pv, RTHEAPOFFSET_ALIGNMENT) == pv, 0);
+
+ /*
+ * Get the block and heap. If in strict mode, validate these.
+ */
+ pBlock = (PRTHEAPOFFSETBLOCK)pv - 1;
+ pHeapInt = RTHEAPOFF_GET_ANCHOR(pBlock);
+ ASSERT_BLOCK_USED(pHeapInt, pBlock);
+ ASSERT_ANCHOR(pHeapInt);
+ Assert(pHeapInt == (PRTHEAPOFFSETINTERNAL)hHeap || !hHeap); RT_NOREF_PV(hHeap);
+
+ /*
+ * Calculate the block size.
+ */
+ cbBlock = (pBlock->offNext ? pBlock->offNext : pHeapInt->cbHeap)
+ - RTHEAPOFF_TO_OFF(pHeapInt, pBlock) - sizeof(RTHEAPOFFSETBLOCK);
+ return cbBlock;
+}
+RT_EXPORT_SYMBOL(RTHeapOffsetSize);
+
+
+RTDECL(size_t) RTHeapOffsetGetHeapSize(RTHEAPOFFSET hHeap)
+{
+ PRTHEAPOFFSETINTERNAL pHeapInt;
+
+ if (hHeap == NIL_RTHEAPOFFSET)
+ return 0;
+
+ pHeapInt = hHeap;
+ AssertPtrReturn(pHeapInt, 0);
+ ASSERT_ANCHOR(pHeapInt);
+ return pHeapInt->cbHeap;
+}
+RT_EXPORT_SYMBOL(RTHeapOffsetGetHeapSize);
+
+
+RTDECL(size_t) RTHeapOffsetGetFreeSize(RTHEAPOFFSET hHeap)
+{
+ PRTHEAPOFFSETINTERNAL pHeapInt;
+
+ if (hHeap == NIL_RTHEAPOFFSET)
+ return 0;
+
+ pHeapInt = hHeap;
+ AssertPtrReturn(pHeapInt, 0);
+ ASSERT_ANCHOR(pHeapInt);
+ return pHeapInt->cbFree;
+}
+RT_EXPORT_SYMBOL(RTHeapOffsetGetFreeSize);
+
+
+RTDECL(void) RTHeapOffsetDump(RTHEAPOFFSET hHeap, PFNRTHEAPOFFSETPRINTF pfnPrintf)
+{
+ PRTHEAPOFFSETINTERNAL pHeapInt = (PRTHEAPOFFSETINTERNAL)hHeap;
+ PRTHEAPOFFSETFREE pBlock;
+
+ pfnPrintf("**** Dumping Heap %p - cbHeap=%x cbFree=%x ****\n",
+ hHeap, pHeapInt->cbHeap, pHeapInt->cbFree);
+
+ for (pBlock = (PRTHEAPOFFSETFREE)(pHeapInt + 1);
+ pBlock;
+ pBlock = RTHEAPOFF_TO_PTR_N(pHeapInt, pBlock->Core.offNext, PRTHEAPOFFSETFREE))
+ {
+ size_t cb = (pBlock->offNext ? pBlock->Core.offNext : pHeapInt->cbHeap)
+ - RTHEAPOFF_TO_OFF(pHeapInt, pBlock) - sizeof(RTHEAPOFFSETBLOCK);
+ if (RTHEAPOFFSETBLOCK_IS_FREE(&pBlock->Core))
+ pfnPrintf("%p %06x FREE offNext=%06x offPrev=%06x fFlags=%#x cb=%#06x : cb=%#06x offNext=%06x offPrev=%06x\n",
+ pBlock, pBlock->Core.offSelf, pBlock->Core.offNext, pBlock->Core.offPrev, pBlock->Core.fFlags, cb,
+ pBlock->cb, pBlock->offNext, pBlock->offPrev);
+ else
+ pfnPrintf("%p %06x USED offNext=%06x offPrev=%06x fFlags=%#x cb=%#06x\n",
+ pBlock, pBlock->Core.offSelf, pBlock->Core.offNext, pBlock->Core.offPrev, pBlock->Core.fFlags, cb);
+ }
+ pfnPrintf("**** Done dumping Heap %p ****\n", hHeap);
+}
+RT_EXPORT_SYMBOL(RTHeapOffsetDump);
+
diff --git a/src/VBox/Runtime/common/alloc/heapsimple.cpp b/src/VBox/Runtime/common/alloc/heapsimple.cpp
new file mode 100644
index 00000000..5cb2c85f
--- /dev/null
+++ b/src/VBox/Runtime/common/alloc/heapsimple.cpp
@@ -0,0 +1,920 @@
+/* $Id: heapsimple.cpp $ */
+/** @file
+ * IPRT - A Simple Heap.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP RTLOGGROUP_DEFAULT
+#include <iprt/heap.h>
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/errcore.h>
+#include <iprt/log.h>
+#include <iprt/string.h>
+#include <iprt/param.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/** Pointer to the heap anchor block. */
+typedef struct RTHEAPSIMPLEINTERNAL *PRTHEAPSIMPLEINTERNAL;
+/** Pointer to a heap block. */
+typedef struct RTHEAPSIMPLEBLOCK *PRTHEAPSIMPLEBLOCK;
+/** Pointer to a free heap block. */
+typedef struct RTHEAPSIMPLEFREE *PRTHEAPSIMPLEFREE;
+
+/**
+ * Structure describing a simple heap block.
+ * If this block is allocated, it is followed by the user data.
+ * If this block is free, see RTHEAPSIMPLEFREE.
+ */
+typedef struct RTHEAPSIMPLEBLOCK
+{
+ /** The next block in the global block list. */
+ PRTHEAPSIMPLEBLOCK pNext;
+ /** The previous block in the global block list. */
+ PRTHEAPSIMPLEBLOCK pPrev;
+ /** Pointer to the heap anchor block. */
+ PRTHEAPSIMPLEINTERNAL pHeap;
+ /** Flags + magic. */
+ uintptr_t fFlags;
+} RTHEAPSIMPLEBLOCK;
+AssertCompileSizeAlignment(RTHEAPSIMPLEBLOCK, 16);
+
+/** The block is free if this flag is set. When cleared it's allocated. */
+#define RTHEAPSIMPLEBLOCK_FLAGS_FREE ((uintptr_t)RT_BIT(0))
+/** The magic value. */
+#define RTHEAPSIMPLEBLOCK_FLAGS_MAGIC ((uintptr_t)0xabcdef00)
+/** The mask that needs to be applied to RTHEAPSIMPLEBLOCK::fFlags to obtain the magic value. */
+#define RTHEAPSIMPLEBLOCK_FLAGS_MAGIC_MASK (~(uintptr_t)RT_BIT(0))
+
+/**
+ * Checks if the specified block is valid or not.
+ * @returns boolean answer.
+ * @param pBlock Pointer to a RTHEAPSIMPLEBLOCK structure.
+ */
+#define RTHEAPSIMPLEBLOCK_IS_VALID(pBlock) \
+ ( ((pBlock)->fFlags & RTHEAPSIMPLEBLOCK_FLAGS_MAGIC_MASK) == RTHEAPSIMPLEBLOCK_FLAGS_MAGIC )
+
+/**
+ * Checks if the specified block is valid and in use.
+ * @returns boolean answer.
+ * @param pBlock Pointer to a RTHEAPSIMPLEBLOCK structure.
+ */
+#define RTHEAPSIMPLEBLOCK_IS_VALID_USED(pBlock) \
+ ( ((pBlock)->fFlags & (RTHEAPSIMPLEBLOCK_FLAGS_MAGIC_MASK | RTHEAPSIMPLEBLOCK_FLAGS_FREE)) \
+ == RTHEAPSIMPLEBLOCK_FLAGS_MAGIC )
+
+/**
+ * Checks if the specified block is valid and free.
+ * @returns boolean answer.
+ * @param pBlock Pointer to a RTHEAPSIMPLEBLOCK structure.
+ */
+#define RTHEAPSIMPLEBLOCK_IS_VALID_FREE(pBlock) \
+ ( ((pBlock)->fFlags & (RTHEAPSIMPLEBLOCK_FLAGS_MAGIC_MASK | RTHEAPSIMPLEBLOCK_FLAGS_FREE)) \
+ == (RTHEAPSIMPLEBLOCK_FLAGS_MAGIC | RTHEAPSIMPLEBLOCK_FLAGS_FREE) )
+
+/**
+ * Checks if the specified block is free or not.
+ * @returns boolean answer.
+ * @param pBlock Pointer to a valid RTHEAPSIMPLEBLOCK structure.
+ */
+#define RTHEAPSIMPLEBLOCK_IS_FREE(pBlock) (!!((pBlock)->fFlags & RTHEAPSIMPLEBLOCK_FLAGS_FREE))
+
+/**
+ * A free heap block.
+ * This is an extended version of RTHEAPSIMPLEBLOCK that takes the unused
+ * user data to store free list pointers and a cached size value.
+ */
+typedef struct RTHEAPSIMPLEFREE
+{
+ /** Core stuff. */
+ RTHEAPSIMPLEBLOCK Core;
+ /** Pointer to the next free block. */
+ PRTHEAPSIMPLEFREE pNext;
+ /** Pointer to the previous free block. */
+ PRTHEAPSIMPLEFREE pPrev;
+ /** The size of the block (excluding the RTHEAPSIMPLEBLOCK part). */
+ size_t cb;
+ /** An alignment filler to make it a multiple of (sizeof(void *) * 2). */
+ size_t Alignment;
+} RTHEAPSIMPLEFREE;
+
+
+/**
+ * The heap anchor block.
+ * This structure is placed at the head of the memory block specified to RTHeapSimpleInit(),
+ * which means that the first RTHEAPSIMPLEBLOCK appears immediately after this structure.
+ */
+typedef struct RTHEAPSIMPLEINTERNAL
+{
+ /** The typical magic (RTHEAPSIMPLE_MAGIC). */
+ size_t uMagic;
+ /** The heap size. (This structure is included!) */
+ size_t cbHeap;
+ /** Pointer to the end of the heap. */
+ void *pvEnd;
+ /** The amount of free memory in the heap. */
+ size_t cbFree;
+ /** Free head pointer. */
+ PRTHEAPSIMPLEFREE pFreeHead;
+ /** Free tail pointer. */
+ PRTHEAPSIMPLEFREE pFreeTail;
+ /** Make the size of this structure is a multiple of 32. */
+ size_t auAlignment[2];
+} RTHEAPSIMPLEINTERNAL;
+AssertCompileSizeAlignment(RTHEAPSIMPLEINTERNAL, 32);
+
+
+/** The minimum allocation size. */
+#define RTHEAPSIMPLE_MIN_BLOCK (sizeof(RTHEAPSIMPLEBLOCK))
+AssertCompile(RTHEAPSIMPLE_MIN_BLOCK >= sizeof(RTHEAPSIMPLEBLOCK));
+AssertCompile(RTHEAPSIMPLE_MIN_BLOCK >= sizeof(RTHEAPSIMPLEFREE) - sizeof(RTHEAPSIMPLEBLOCK));
+
+/** The minimum and default alignment. */
+#define RTHEAPSIMPLE_ALIGNMENT (sizeof(RTHEAPSIMPLEBLOCK))
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#ifdef RT_STRICT
+# define RTHEAPSIMPLE_STRICT 1
+#endif
+
+#define ASSERT_L(a, b) AssertMsg((uintptr_t)(a) < (uintptr_t)(b), ("a=%p b=%p\n", (uintptr_t)(a), (uintptr_t)(b)))
+#define ASSERT_LE(a, b) AssertMsg((uintptr_t)(a) <= (uintptr_t)(b), ("a=%p b=%p\n", (uintptr_t)(a), (uintptr_t)(b)))
+#define ASSERT_G(a, b) AssertMsg((uintptr_t)(a) > (uintptr_t)(b), ("a=%p b=%p\n", (uintptr_t)(a), (uintptr_t)(b)))
+#define ASSERT_GE(a, b) AssertMsg((uintptr_t)(a) >= (uintptr_t)(b), ("a=%p b=%p\n", (uintptr_t)(a), (uintptr_t)(b)))
+#define ASSERT_ALIGN(a) AssertMsg(!((uintptr_t)(a) & (RTHEAPSIMPLE_ALIGNMENT - 1)), ("a=%p\n", (uintptr_t)(a)))
+
+#define ASSERT_PREV(pHeapInt, pBlock) \
+ do { ASSERT_ALIGN((pBlock)->pPrev); \
+ if ((pBlock)->pPrev) \
+ { \
+ ASSERT_L((pBlock)->pPrev, (pBlock)); \
+ ASSERT_GE((pBlock)->pPrev, (pHeapInt) + 1); \
+ } \
+ else \
+ Assert((pBlock) == (PRTHEAPSIMPLEBLOCK)((pHeapInt) + 1)); \
+ } while (0)
+
+#define ASSERT_NEXT(pHeap, pBlock) \
+ do { ASSERT_ALIGN((pBlock)->pNext); \
+ if ((pBlock)->pNext) \
+ { \
+ ASSERT_L((pBlock)->pNext, (pHeapInt)->pvEnd); \
+ ASSERT_G((pBlock)->pNext, (pBlock)); \
+ } \
+ } while (0)
+
+#define ASSERT_BLOCK(pHeapInt, pBlock) \
+ do { AssertMsg(RTHEAPSIMPLEBLOCK_IS_VALID(pBlock), ("%#x\n", (pBlock)->fFlags)); \
+ AssertMsg((pBlock)->pHeap == (pHeapInt), ("%p != %p\n", (pBlock)->pHeap, (pHeapInt))); \
+ ASSERT_GE((pBlock), (pHeapInt) + 1); \
+ ASSERT_L((pBlock), (pHeapInt)->pvEnd); \
+ ASSERT_NEXT(pHeapInt, pBlock); \
+ ASSERT_PREV(pHeapInt, pBlock); \
+ } while (0)
+
+#define ASSERT_BLOCK_USED(pHeapInt, pBlock) \
+ do { AssertMsg(RTHEAPSIMPLEBLOCK_IS_VALID_USED((pBlock)), ("%#x\n", (pBlock)->fFlags)); \
+ AssertMsg((pBlock)->pHeap == (pHeapInt), ("%p != %p\n", (pBlock)->pHeap, (pHeapInt))); \
+ ASSERT_GE((pBlock), (pHeapInt) + 1); \
+ ASSERT_L((pBlock), (pHeapInt)->pvEnd); \
+ ASSERT_NEXT(pHeapInt, pBlock); \
+ ASSERT_PREV(pHeapInt, pBlock); \
+ } while (0)
+
+#define ASSERT_FREE_PREV(pHeapInt, pBlock) \
+ do { ASSERT_ALIGN((pBlock)->pPrev); \
+ if ((pBlock)->pPrev) \
+ { \
+ ASSERT_GE((pBlock)->pPrev, (pHeapInt)->pFreeHead); \
+ ASSERT_L((pBlock)->pPrev, (pBlock)); \
+ ASSERT_LE((pBlock)->pPrev, (pBlock)->Core.pPrev); \
+ } \
+ else \
+ Assert((pBlock) == (pHeapInt)->pFreeHead); \
+ } while (0)
+
+#define ASSERT_FREE_NEXT(pHeapInt, pBlock) \
+ do { ASSERT_ALIGN((pBlock)->pNext); \
+ if ((pBlock)->pNext) \
+ { \
+ ASSERT_LE((pBlock)->pNext, (pHeapInt)->pFreeTail); \
+ ASSERT_G((pBlock)->pNext, (pBlock)); \
+ ASSERT_GE((pBlock)->pNext, (pBlock)->Core.pNext); \
+ } \
+ else \
+ Assert((pBlock) == (pHeapInt)->pFreeTail); \
+ } while (0)
+
+#ifdef RTHEAPSIMPLE_STRICT
+# define ASSERT_FREE_CB(pHeapInt, pBlock) \
+ do { size_t cbCalc = ((pBlock)->Core.pNext ? (uintptr_t)(pBlock)->Core.pNext : (uintptr_t)(pHeapInt)->pvEnd) \
+ - (uintptr_t)(pBlock) - sizeof(RTHEAPSIMPLEBLOCK); \
+ AssertMsg((pBlock)->cb == cbCalc, ("cb=%#zx cbCalc=%#zx\n", (pBlock)->cb, cbCalc)); \
+ } while (0)
+#else
+# define ASSERT_FREE_CB(pHeapInt, pBlock) do {} while (0)
+#endif
+
+/** Asserts that a free block is valid. */
+#define ASSERT_BLOCK_FREE(pHeapInt, pBlock) \
+ do { ASSERT_BLOCK(pHeapInt, &(pBlock)->Core); \
+ Assert(RTHEAPSIMPLEBLOCK_IS_VALID_FREE(&(pBlock)->Core)); \
+ ASSERT_GE((pBlock), (pHeapInt)->pFreeHead); \
+ ASSERT_LE((pBlock), (pHeapInt)->pFreeTail); \
+ ASSERT_FREE_NEXT(pHeapInt, pBlock); \
+ ASSERT_FREE_PREV(pHeapInt, pBlock); \
+ ASSERT_FREE_CB(pHeapInt, pBlock); \
+ } while (0)
+
+/** Asserts that the heap anchor block is ok. */
+#define ASSERT_ANCHOR(pHeapInt) \
+ do { AssertPtr(pHeapInt);\
+ Assert((pHeapInt)->uMagic == RTHEAPSIMPLE_MAGIC); \
+ } while (0)
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+#ifdef RTHEAPSIMPLE_STRICT
+static void rtHeapSimpleAssertAll(PRTHEAPSIMPLEINTERNAL pHeapInt);
+#endif
+static PRTHEAPSIMPLEBLOCK rtHeapSimpleAllocBlock(PRTHEAPSIMPLEINTERNAL pHeapInt, size_t cb, size_t uAlignment);
+static void rtHeapSimpleFreeBlock(PRTHEAPSIMPLEINTERNAL pHeapInt, PRTHEAPSIMPLEBLOCK pBlock);
+
+
+RTDECL(int) RTHeapSimpleInit(PRTHEAPSIMPLE phHeap, void *pvMemory, size_t cbMemory)
+{
+ PRTHEAPSIMPLEINTERNAL pHeapInt;
+ PRTHEAPSIMPLEFREE pFree;
+ unsigned i;
+
+ /*
+ * Validate input. The imposed minimum heap size is just a convenient value.
+ */
+ AssertReturn(cbMemory >= PAGE_SIZE, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pvMemory, VERR_INVALID_POINTER);
+ AssertReturn((uintptr_t)pvMemory + (cbMemory - 1) > (uintptr_t)cbMemory, VERR_INVALID_PARAMETER);
+
+ /*
+ * Place the heap anchor block at the start of the heap memory,
+ * enforce 32 byte alignment of it. Also align the heap size correctly.
+ */
+ pHeapInt = (PRTHEAPSIMPLEINTERNAL)pvMemory;
+ if ((uintptr_t)pvMemory & 31)
+ {
+ const uintptr_t off = 32 - ((uintptr_t)pvMemory & 31);
+ cbMemory -= off;
+ pHeapInt = (PRTHEAPSIMPLEINTERNAL)((uintptr_t)pvMemory + off);
+ }
+ cbMemory &= ~(RTHEAPSIMPLE_ALIGNMENT - 1);
+
+
+ /* Init the heap anchor block. */
+ pHeapInt->uMagic = RTHEAPSIMPLE_MAGIC;
+ pHeapInt->pvEnd = (uint8_t *)pHeapInt + cbMemory;
+ pHeapInt->cbHeap = cbMemory;
+ pHeapInt->cbFree = cbMemory
+ - sizeof(RTHEAPSIMPLEBLOCK)
+ - sizeof(RTHEAPSIMPLEINTERNAL);
+ pHeapInt->pFreeTail = pHeapInt->pFreeHead = (PRTHEAPSIMPLEFREE)(pHeapInt + 1);
+ for (i = 0; i < RT_ELEMENTS(pHeapInt->auAlignment); i++)
+ pHeapInt->auAlignment[i] = ~(size_t)0;
+
+ /* Init the single free block. */
+ pFree = pHeapInt->pFreeHead;
+ pFree->Core.pNext = NULL;
+ pFree->Core.pPrev = NULL;
+ pFree->Core.pHeap = pHeapInt;
+ pFree->Core.fFlags = RTHEAPSIMPLEBLOCK_FLAGS_MAGIC | RTHEAPSIMPLEBLOCK_FLAGS_FREE;
+ pFree->pNext = NULL;
+ pFree->pPrev = NULL;
+ pFree->cb = pHeapInt->cbFree;
+
+ *phHeap = pHeapInt;
+
+#ifdef RTHEAPSIMPLE_STRICT
+ rtHeapSimpleAssertAll(pHeapInt);
+#endif
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTHeapSimpleInit);
+
+
+RTDECL(int) RTHeapSimpleRelocate(RTHEAPSIMPLE hHeap, uintptr_t offDelta)
+{
+ PRTHEAPSIMPLEINTERNAL pHeapInt = hHeap;
+ PRTHEAPSIMPLEFREE pCur;
+
+ /*
+ * Validate input.
+ */
+ AssertPtrReturn(pHeapInt, VERR_INVALID_HANDLE);
+ AssertReturn(pHeapInt->uMagic == RTHEAPSIMPLE_MAGIC, VERR_INVALID_HANDLE);
+ AssertMsgReturn((uintptr_t)pHeapInt - (uintptr_t)pHeapInt->pvEnd + pHeapInt->cbHeap == offDelta,
+ ("offDelta=%p, expected=%p\n", offDelta, (uintptr_t)pHeapInt->pvEnd - pHeapInt->cbHeap - (uintptr_t)pHeapInt),
+ VERR_INVALID_PARAMETER);
+
+ /*
+ * Relocate the heap anchor block.
+ */
+#define RELOCATE_IT(var, type, offDelta) do { if (RT_UNLIKELY((var) != NULL)) { (var) = (type)((uintptr_t)(var) + offDelta); } } while (0)
+ RELOCATE_IT(pHeapInt->pvEnd, void *, offDelta);
+ RELOCATE_IT(pHeapInt->pFreeHead, PRTHEAPSIMPLEFREE, offDelta);
+ RELOCATE_IT(pHeapInt->pFreeTail, PRTHEAPSIMPLEFREE, offDelta);
+
+ /*
+ * Walk the heap blocks.
+ */
+ for (pCur = (PRTHEAPSIMPLEFREE)(pHeapInt + 1);
+ pCur && (uintptr_t)pCur < (uintptr_t)pHeapInt->pvEnd;
+ pCur = (PRTHEAPSIMPLEFREE)pCur->Core.pNext)
+ {
+ RELOCATE_IT(pCur->Core.pNext, PRTHEAPSIMPLEBLOCK, offDelta);
+ RELOCATE_IT(pCur->Core.pPrev, PRTHEAPSIMPLEBLOCK, offDelta);
+ RELOCATE_IT(pCur->Core.pHeap, PRTHEAPSIMPLEINTERNAL, offDelta);
+ if (RTHEAPSIMPLEBLOCK_IS_FREE(&pCur->Core))
+ {
+ RELOCATE_IT(pCur->pNext, PRTHEAPSIMPLEFREE, offDelta);
+ RELOCATE_IT(pCur->pPrev, PRTHEAPSIMPLEFREE, offDelta);
+ }
+ }
+#undef RELOCATE_IT
+
+#ifdef RTHEAPSIMPLE_STRICT
+ /*
+ * Give it a once over before we return.
+ */
+ rtHeapSimpleAssertAll(pHeapInt);
+#endif
+ return VINF_SUCCESS;
+}
+RT_EXPORT_SYMBOL(RTHeapSimpleRelocate);
+
+
+RTDECL(void *) RTHeapSimpleAlloc(RTHEAPSIMPLE hHeap, size_t cb, size_t cbAlignment)
+{
+ PRTHEAPSIMPLEINTERNAL pHeapInt = hHeap;
+ PRTHEAPSIMPLEBLOCK pBlock;
+
+ /*
+ * Validate and adjust the input.
+ */
+ AssertPtrReturn(pHeapInt, NULL);
+ if (cb < RTHEAPSIMPLE_MIN_BLOCK)
+ cb = RTHEAPSIMPLE_MIN_BLOCK;
+ else
+ cb = RT_ALIGN_Z(cb, RTHEAPSIMPLE_ALIGNMENT);
+ if (!cbAlignment)
+ cbAlignment = RTHEAPSIMPLE_ALIGNMENT;
+ else
+ {
+ Assert(!(cbAlignment & (cbAlignment - 1)));
+ Assert((cbAlignment & ~(cbAlignment - 1)) == cbAlignment);
+ if (cbAlignment < RTHEAPSIMPLE_ALIGNMENT)
+ cbAlignment = RTHEAPSIMPLE_ALIGNMENT;
+ }
+
+ /*
+ * Do the allocation.
+ */
+ pBlock = rtHeapSimpleAllocBlock(pHeapInt, cb, cbAlignment);
+ if (RT_LIKELY(pBlock))
+ {
+ void *pv = pBlock + 1;
+ return pv;
+ }
+ return NULL;
+}
+RT_EXPORT_SYMBOL(RTHeapSimpleAlloc);
+
+
+RTDECL(void *) RTHeapSimpleAllocZ(RTHEAPSIMPLE hHeap, size_t cb, size_t cbAlignment)
+{
+ PRTHEAPSIMPLEINTERNAL pHeapInt = hHeap;
+ PRTHEAPSIMPLEBLOCK pBlock;
+
+ /*
+ * Validate and adjust the input.
+ */
+ AssertPtrReturn(pHeapInt, NULL);
+ if (cb < RTHEAPSIMPLE_MIN_BLOCK)
+ cb = RTHEAPSIMPLE_MIN_BLOCK;
+ else
+ cb = RT_ALIGN_Z(cb, RTHEAPSIMPLE_ALIGNMENT);
+ if (!cbAlignment)
+ cbAlignment = RTHEAPSIMPLE_ALIGNMENT;
+ else
+ {
+ Assert(!(cbAlignment & (cbAlignment - 1)));
+ Assert((cbAlignment & ~(cbAlignment - 1)) == cbAlignment);
+ if (cbAlignment < RTHEAPSIMPLE_ALIGNMENT)
+ cbAlignment = RTHEAPSIMPLE_ALIGNMENT;
+ }
+
+ /*
+ * Do the allocation.
+ */
+ pBlock = rtHeapSimpleAllocBlock(pHeapInt, cb, cbAlignment);
+ if (RT_LIKELY(pBlock))
+ {
+ void *pv = pBlock + 1;
+ memset(pv, 0, cb);
+ return pv;
+ }
+ return NULL;
+}
+RT_EXPORT_SYMBOL(RTHeapSimpleAllocZ);
+
+
+/**
+ * Allocates a block of memory from the specified heap.
+ *
+ * No parameter validation or adjustment is performed.
+ *
+ * @returns Pointer to the allocated block.
+ * @returns NULL on failure.
+ *
+ * @param pHeapInt The heap.
+ * @param cb Size of the memory block to allocate.
+ * @param uAlignment The alignment specifications for the allocated block.
+ */
+static PRTHEAPSIMPLEBLOCK rtHeapSimpleAllocBlock(PRTHEAPSIMPLEINTERNAL pHeapInt, size_t cb, size_t uAlignment)
+{
+ PRTHEAPSIMPLEBLOCK pRet = NULL;
+ PRTHEAPSIMPLEFREE pFree;
+
+#ifdef RTHEAPSIMPLE_STRICT
+ rtHeapSimpleAssertAll(pHeapInt);
+#endif
+
+ /*
+ * Search for a fitting block from the lower end of the heap.
+ */
+ for (pFree = pHeapInt->pFreeHead;
+ pFree;
+ pFree = pFree->pNext)
+ {
+ uintptr_t offAlign;
+ ASSERT_BLOCK_FREE(pHeapInt, pFree);
+
+ /*
+ * Match for size and alignment.
+ */
+ if (pFree->cb < cb)
+ continue;
+ offAlign = (uintptr_t)(&pFree->Core + 1) & (uAlignment - 1);
+ if (offAlign)
+ {
+ RTHEAPSIMPLEFREE Free;
+ PRTHEAPSIMPLEBLOCK pPrev;
+
+ offAlign = uAlignment - offAlign;
+ if (pFree->cb - offAlign < cb)
+ continue;
+
+ /*
+ * Make a stack copy of the free block header and adjust the pointer.
+ */
+ Free = *pFree;
+ pFree = (PRTHEAPSIMPLEFREE)((uintptr_t)pFree + offAlign);
+
+ /*
+ * Donate offAlign bytes to the node in front of us.
+ * If we're the head node, we'll have to create a fake node. We'll
+ * mark it USED for simplicity.
+ *
+ * (Should this policy of donating memory to the guy in front of us
+ * cause big 'leaks', we could create a new free node if there is room
+ * for that.)
+ */
+ pPrev = Free.Core.pPrev;
+ if (pPrev)
+ {
+ AssertMsg(!RTHEAPSIMPLEBLOCK_IS_FREE(pPrev), ("Impossible!\n"));
+ pPrev->pNext = &pFree->Core;
+ }
+ else
+ {
+ pPrev = (PRTHEAPSIMPLEBLOCK)(pHeapInt + 1);
+ Assert(pPrev == &pFree->Core);
+ pPrev->pPrev = NULL;
+ pPrev->pNext = &pFree->Core;
+ pPrev->pHeap = pHeapInt;
+ pPrev->fFlags = RTHEAPSIMPLEBLOCK_FLAGS_MAGIC;
+ }
+ pHeapInt->cbFree -= offAlign;
+
+ /*
+ * Recreate pFree in the new position and adjust the neighbors.
+ */
+ *pFree = Free;
+
+ /* the core */
+ if (pFree->Core.pNext)
+ pFree->Core.pNext->pPrev = &pFree->Core;
+ pFree->Core.pPrev = pPrev;
+
+ /* the free part */
+ pFree->cb -= offAlign;
+ if (pFree->pNext)
+ pFree->pNext->pPrev = pFree;
+ else
+ pHeapInt->pFreeTail = pFree;
+ if (pFree->pPrev)
+ pFree->pPrev->pNext = pFree;
+ else
+ pHeapInt->pFreeHead = pFree;
+ ASSERT_BLOCK_FREE(pHeapInt, pFree);
+ ASSERT_BLOCK_USED(pHeapInt, pPrev);
+ }
+
+ /*
+ * Split off a new FREE block?
+ */
+ if (pFree->cb >= cb + RT_ALIGN_Z(sizeof(RTHEAPSIMPLEFREE), RTHEAPSIMPLE_ALIGNMENT))
+ {
+ /*
+ * Move the FREE block up to make room for the new USED block.
+ */
+ PRTHEAPSIMPLEFREE pNew = (PRTHEAPSIMPLEFREE)((uintptr_t)&pFree->Core + cb + sizeof(RTHEAPSIMPLEBLOCK));
+
+ pNew->Core.pNext = pFree->Core.pNext;
+ if (pFree->Core.pNext)
+ pFree->Core.pNext->pPrev = &pNew->Core;
+ pNew->Core.pPrev = &pFree->Core;
+ pNew->Core.pHeap = pHeapInt;
+ pNew->Core.fFlags = RTHEAPSIMPLEBLOCK_FLAGS_MAGIC | RTHEAPSIMPLEBLOCK_FLAGS_FREE;
+
+ pNew->pNext = pFree->pNext;
+ if (pNew->pNext)
+ pNew->pNext->pPrev = pNew;
+ else
+ pHeapInt->pFreeTail = pNew;
+ pNew->pPrev = pFree->pPrev;
+ if (pNew->pPrev)
+ pNew->pPrev->pNext = pNew;
+ else
+ pHeapInt->pFreeHead = pNew;
+ pNew->cb = (pNew->Core.pNext ? (uintptr_t)pNew->Core.pNext : (uintptr_t)pHeapInt->pvEnd) \
+ - (uintptr_t)pNew - sizeof(RTHEAPSIMPLEBLOCK);
+ ASSERT_BLOCK_FREE(pHeapInt, pNew);
+
+ /*
+ * Update the old FREE node making it a USED node.
+ */
+ pFree->Core.fFlags &= ~RTHEAPSIMPLEBLOCK_FLAGS_FREE;
+ pFree->Core.pNext = &pNew->Core;
+ pHeapInt->cbFree -= pFree->cb;
+ pHeapInt->cbFree += pNew->cb;
+ pRet = &pFree->Core;
+ ASSERT_BLOCK_USED(pHeapInt, pRet);
+ }
+ else
+ {
+ /*
+ * Link it out of the free list.
+ */
+ if (pFree->pNext)
+ pFree->pNext->pPrev = pFree->pPrev;
+ else
+ pHeapInt->pFreeTail = pFree->pPrev;
+ if (pFree->pPrev)
+ pFree->pPrev->pNext = pFree->pNext;
+ else
+ pHeapInt->pFreeHead = pFree->pNext;
+
+ /*
+ * Convert it to a used block.
+ */
+ pHeapInt->cbFree -= pFree->cb;
+ pFree->Core.fFlags &= ~RTHEAPSIMPLEBLOCK_FLAGS_FREE;
+ pRet = &pFree->Core;
+ ASSERT_BLOCK_USED(pHeapInt, pRet);
+ }
+ break;
+ }
+
+#ifdef RTHEAPSIMPLE_STRICT
+ rtHeapSimpleAssertAll(pHeapInt);
+#endif
+ return pRet;
+}
+
+
+RTDECL(void) RTHeapSimpleFree(RTHEAPSIMPLE hHeap, void *pv)
+{
+ PRTHEAPSIMPLEINTERNAL pHeapInt;
+ PRTHEAPSIMPLEBLOCK pBlock;
+
+ /*
+ * Validate input.
+ */
+ if (!pv)
+ return;
+ AssertPtr(pv);
+ Assert(RT_ALIGN_P(pv, RTHEAPSIMPLE_ALIGNMENT) == pv);
+
+ /*
+ * Get the block and heap. If in strict mode, validate these.
+ */
+ pBlock = (PRTHEAPSIMPLEBLOCK)pv - 1;
+ pHeapInt = pBlock->pHeap;
+ ASSERT_BLOCK_USED(pHeapInt, pBlock);
+ ASSERT_ANCHOR(pHeapInt);
+ Assert(pHeapInt == (PRTHEAPSIMPLEINTERNAL)hHeap || !hHeap); RT_NOREF_PV(hHeap);
+
+#ifdef RTHEAPSIMPLE_FREE_POISON
+ /*
+ * Poison the block.
+ */
+ const size_t cbBlock = (pBlock->pNext ? (uintptr_t)pBlock->pNext : (uintptr_t)pHeapInt->pvEnd)
+ - (uintptr_t)pBlock - sizeof(RTHEAPSIMPLEBLOCK);
+ memset(pBlock + 1, RTHEAPSIMPLE_FREE_POISON, cbBlock);
+#endif
+
+ /*
+ * Call worker which does the actual job.
+ */
+ rtHeapSimpleFreeBlock(pHeapInt, pBlock);
+}
+RT_EXPORT_SYMBOL(RTHeapSimpleFree);
+
+
+/**
+ * Free a memory block.
+ *
+ * @param pHeapInt The heap.
+ * @param pBlock The memory block to free.
+ */
+static void rtHeapSimpleFreeBlock(PRTHEAPSIMPLEINTERNAL pHeapInt, PRTHEAPSIMPLEBLOCK pBlock)
+{
+ PRTHEAPSIMPLEFREE pFree = (PRTHEAPSIMPLEFREE)pBlock;
+ PRTHEAPSIMPLEFREE pLeft;
+ PRTHEAPSIMPLEFREE pRight;
+
+#ifdef RTHEAPSIMPLE_STRICT
+ rtHeapSimpleAssertAll(pHeapInt);
+#endif
+
+ /*
+ * Look for the closest free list blocks by walking the blocks right
+ * of us (both lists are sorted by address).
+ */
+ pLeft = NULL;
+ pRight = NULL;
+ if (pHeapInt->pFreeTail)
+ {
+ pRight = (PRTHEAPSIMPLEFREE)pFree->Core.pNext;
+ while (pRight && !RTHEAPSIMPLEBLOCK_IS_FREE(&pRight->Core))
+ {
+ ASSERT_BLOCK(pHeapInt, &pRight->Core);
+ pRight = (PRTHEAPSIMPLEFREE)pRight->Core.pNext;
+ }
+ if (!pRight)
+ pLeft = pHeapInt->pFreeTail;
+ else
+ {
+ ASSERT_BLOCK_FREE(pHeapInt, pRight);
+ pLeft = pRight->pPrev;
+ }
+ if (pLeft)
+ ASSERT_BLOCK_FREE(pHeapInt, pLeft);
+ }
+ AssertMsgReturnVoid(pLeft != pFree, ("Freed twice! pv=%p (pBlock=%p)\n", pBlock + 1, pBlock));
+ ASSERT_L(pLeft, pFree);
+ Assert(!pRight || (uintptr_t)pRight > (uintptr_t)pFree);
+ Assert(!pLeft || pLeft->pNext == pRight);
+
+ /*
+ * Insert at the head of the free block list?
+ */
+ if (!pLeft)
+ {
+ Assert(pRight == pHeapInt->pFreeHead);
+ pFree->Core.fFlags |= RTHEAPSIMPLEBLOCK_FLAGS_FREE;
+ pFree->pPrev = NULL;
+ pFree->pNext = pRight;
+ if (pRight)
+ pRight->pPrev = pFree;
+ else
+ pHeapInt->pFreeTail = pFree;
+ pHeapInt->pFreeHead = pFree;
+ }
+ else
+ {
+ /*
+ * Can we merge with left hand free block?
+ */
+ if (pLeft->Core.pNext == &pFree->Core)
+ {
+ pLeft->Core.pNext = pFree->Core.pNext;
+ if (pFree->Core.pNext)
+ pFree->Core.pNext->pPrev = &pLeft->Core;
+ pHeapInt->cbFree -= pLeft->cb;
+ pFree = pLeft;
+ }
+ /*
+ * No, just link it into the free list then.
+ */
+ else
+ {
+ pFree->Core.fFlags |= RTHEAPSIMPLEBLOCK_FLAGS_FREE;
+ pFree->pNext = pRight;
+ pFree->pPrev = pLeft;
+ pLeft->pNext = pFree;
+ if (pRight)
+ pRight->pPrev = pFree;
+ else
+ pHeapInt->pFreeTail = pFree;
+ }
+ }
+
+ /*
+ * Can we merge with right hand free block?
+ */
+ if ( pRight
+ && pRight->Core.pPrev == &pFree->Core)
+ {
+ /* core */
+ pFree->Core.pNext = pRight->Core.pNext;
+ if (pRight->Core.pNext)
+ pRight->Core.pNext->pPrev = &pFree->Core;
+
+ /* free */
+ pFree->pNext = pRight->pNext;
+ if (pRight->pNext)
+ pRight->pNext->pPrev = pFree;
+ else
+ pHeapInt->pFreeTail = pFree;
+ pHeapInt->cbFree -= pRight->cb;
+ }
+
+ /*
+ * Calculate the size and update free stats.
+ */
+ pFree->cb = (pFree->Core.pNext ? (uintptr_t)pFree->Core.pNext : (uintptr_t)pHeapInt->pvEnd)
+ - (uintptr_t)pFree - sizeof(RTHEAPSIMPLEBLOCK);
+ pHeapInt->cbFree += pFree->cb;
+ ASSERT_BLOCK_FREE(pHeapInt, pFree);
+
+#ifdef RTHEAPSIMPLE_STRICT
+ rtHeapSimpleAssertAll(pHeapInt);
+#endif
+}
+
+
+#ifdef RTHEAPSIMPLE_STRICT
+/**
+ * Internal consistency check (relying on assertions).
+ * @param pHeapInt
+ */
+static void rtHeapSimpleAssertAll(PRTHEAPSIMPLEINTERNAL pHeapInt)
+{
+ PRTHEAPSIMPLEFREE pPrev = NULL;
+ PRTHEAPSIMPLEFREE pPrevFree = NULL;
+ PRTHEAPSIMPLEFREE pBlock;
+ for (pBlock = (PRTHEAPSIMPLEFREE)(pHeapInt + 1);
+ pBlock;
+ pBlock = (PRTHEAPSIMPLEFREE)pBlock->Core.pNext)
+ {
+ if (RTHEAPSIMPLEBLOCK_IS_FREE(&pBlock->Core))
+ {
+ ASSERT_BLOCK_FREE(pHeapInt, pBlock);
+ Assert(pBlock->pPrev == pPrevFree);
+ Assert(pPrevFree || pHeapInt->pFreeHead == pBlock);
+ pPrevFree = pBlock;
+ }
+ else
+ ASSERT_BLOCK_USED(pHeapInt, &pBlock->Core);
+ Assert(!pPrev || pPrev == (PRTHEAPSIMPLEFREE)pBlock->Core.pPrev);
+ pPrev = pBlock;
+ }
+ Assert(pHeapInt->pFreeTail == pPrevFree);
+}
+#endif
+
+
+RTDECL(size_t) RTHeapSimpleSize(RTHEAPSIMPLE hHeap, void *pv)
+{
+ PRTHEAPSIMPLEINTERNAL pHeapInt;
+ PRTHEAPSIMPLEBLOCK pBlock;
+ size_t cbBlock;
+
+ /*
+ * Validate input.
+ */
+ if (!pv)
+ return 0;
+ AssertPtrReturn(pv, 0);
+ AssertReturn(RT_ALIGN_P(pv, RTHEAPSIMPLE_ALIGNMENT) == pv, 0);
+
+ /*
+ * Get the block and heap. If in strict mode, validate these.
+ */
+ pBlock = (PRTHEAPSIMPLEBLOCK)pv - 1;
+ pHeapInt = pBlock->pHeap;
+ ASSERT_BLOCK_USED(pHeapInt, pBlock);
+ ASSERT_ANCHOR(pHeapInt);
+ Assert(pHeapInt == (PRTHEAPSIMPLEINTERNAL)hHeap || !hHeap); RT_NOREF_PV(hHeap);
+
+ /*
+ * Calculate the block size.
+ */
+ cbBlock = (pBlock->pNext ? (uintptr_t)pBlock->pNext : (uintptr_t)pHeapInt->pvEnd)
+ - (uintptr_t)pBlock- sizeof(RTHEAPSIMPLEBLOCK);
+ return cbBlock;
+}
+RT_EXPORT_SYMBOL(RTHeapSimpleSize);
+
+
+RTDECL(size_t) RTHeapSimpleGetHeapSize(RTHEAPSIMPLE hHeap)
+{
+ PRTHEAPSIMPLEINTERNAL pHeapInt;
+
+ if (hHeap == NIL_RTHEAPSIMPLE)
+ return 0;
+
+ pHeapInt = hHeap;
+ AssertPtrReturn(pHeapInt, 0);
+ ASSERT_ANCHOR(pHeapInt);
+ return pHeapInt->cbHeap;
+}
+RT_EXPORT_SYMBOL(RTHeapSimpleGetHeapSize);
+
+
+RTDECL(size_t) RTHeapSimpleGetFreeSize(RTHEAPSIMPLE hHeap)
+{
+ PRTHEAPSIMPLEINTERNAL pHeapInt;
+
+ if (hHeap == NIL_RTHEAPSIMPLE)
+ return 0;
+
+ pHeapInt = hHeap;
+ AssertPtrReturn(pHeapInt, 0);
+ ASSERT_ANCHOR(pHeapInt);
+ return pHeapInt->cbFree;
+}
+RT_EXPORT_SYMBOL(RTHeapSimpleGetFreeSize);
+
+
+RTDECL(void) RTHeapSimpleDump(RTHEAPSIMPLE hHeap, PFNRTHEAPSIMPLEPRINTF pfnPrintf)
+{
+ PRTHEAPSIMPLEINTERNAL pHeapInt = (PRTHEAPSIMPLEINTERNAL)hHeap;
+ PRTHEAPSIMPLEFREE pBlock;
+
+ pfnPrintf("**** Dumping Heap %p - cbHeap=%zx cbFree=%zx ****\n",
+ hHeap, pHeapInt->cbHeap, pHeapInt->cbFree);
+
+ for (pBlock = (PRTHEAPSIMPLEFREE)(pHeapInt + 1);
+ pBlock;
+ pBlock = (PRTHEAPSIMPLEFREE)pBlock->Core.pNext)
+ {
+ size_t cb = (pBlock->pNext ? (uintptr_t)pBlock->Core.pNext : (uintptr_t)pHeapInt->pvEnd)
+ - (uintptr_t)pBlock - sizeof(RTHEAPSIMPLEBLOCK);
+ if (RTHEAPSIMPLEBLOCK_IS_FREE(&pBlock->Core))
+ pfnPrintf("%p %06x FREE pNext=%p pPrev=%p fFlags=%#x cb=%#06x : cb=%#06x pNext=%p pPrev=%p\n",
+ pBlock, (uintptr_t)pBlock - (uintptr_t)(pHeapInt + 1), pBlock->Core.pNext, pBlock->Core.pPrev, pBlock->Core.fFlags, cb,
+ pBlock->cb, pBlock->pNext, pBlock->pPrev);
+ else
+ pfnPrintf("%p %06x USED pNext=%p pPrev=%p fFlags=%#x cb=%#06x\n",
+ pBlock, (uintptr_t)pBlock - (uintptr_t)(pHeapInt + 1), pBlock->Core.pNext, pBlock->Core.pPrev, pBlock->Core.fFlags, cb);
+ }
+ pfnPrintf("**** Done dumping Heap %p ****\n", hHeap);
+}
+RT_EXPORT_SYMBOL(RTHeapSimpleDump);
+
diff --git a/src/VBox/Runtime/common/alloc/memcache.cpp b/src/VBox/Runtime/common/alloc/memcache.cpp
new file mode 100644
index 00000000..3894b7e0
--- /dev/null
+++ b/src/VBox/Runtime/common/alloc/memcache.cpp
@@ -0,0 +1,585 @@
+/* $Id: memcache.cpp $ */
+/** @file
+ * IPRT - Memory Object Allocation Cache.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/memcache.h>
+#include "internal/iprt.h"
+
+#include <iprt/assert.h>
+#include <iprt/asm.h>
+#include <iprt/critsect.h>
+#include <iprt/err.h>
+#include <iprt/mem.h>
+#include <iprt/param.h>
+
+#include "internal/magics.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/** Pointer to a cache instance. */
+typedef struct RTMEMCACHEINT *PRTMEMCACHEINT;
+/** Pointer to a cache page. */
+typedef struct RTMEMCACHEPAGE *PRTMEMCACHEPAGE;
+
+
+
+/**
+ * A free object.
+ *
+ * @remarks This only works if the objects don't have a constructor or
+ * destructor and are big enough.
+ */
+typedef struct RTMEMCACHEFREEOBJ
+{
+ /** Pointer to the next free object */
+ struct RTMEMCACHEFREEOBJ * volatile pNext;
+} RTMEMCACHEFREEOBJ;
+/** Pointer to a free object. */
+typedef RTMEMCACHEFREEOBJ *PRTMEMCACHEFREEOBJ;
+
+
+/**
+ * A cache page.
+ *
+ * This is a page of memory that we split up in to a bunch object sized chunks
+ * and hand out to the cache users. The bitmap is updated in an atomic fashion
+ * so that we don't have to take any locks when freeing or allocating memory.
+ */
+typedef struct RTMEMCACHEPAGE
+{
+ /** Pointer to the cache owning this page.
+ * This is used for validation purposes only. */
+ PRTMEMCACHEINT pCache;
+ /** Pointer to the next page.
+ * This is marked as volatile since we'll be adding new entries to the list
+ * without taking any locks. */
+ PRTMEMCACHEPAGE volatile pNext;
+ /** Bitmap tracking allocated blocks. */
+ void volatile *pbmAlloc;
+ /** Bitmap tracking which blocks that has been thru the constructor. */
+ void volatile *pbmCtor;
+ /** Pointer to the object array. */
+ uint8_t *pbObjects;
+ /** The number of objects on this page. */
+ uint32_t cObjects;
+
+ /** Padding to force cFree into the next cache line. (ASSUMES CL = 64) */
+ uint8_t abPadding[ARCH_BITS == 32 ? 64 - 6*4 : 64 - 5*8 - 4];
+ /** The number of free objects. */
+ int32_t volatile cFree;
+} RTMEMCACHEPAGE;
+AssertCompileMemberOffset(RTMEMCACHEPAGE, cFree, 64);
+
+
+/**
+ * Memory object cache instance.
+ */
+typedef struct RTMEMCACHEINT
+{
+ /** Magic value (RTMEMCACHE_MAGIC). */
+ uint32_t u32Magic;
+ /** The object size. */
+ uint32_t cbObject;
+ /** Object alignment. */
+ uint32_t cbAlignment;
+ /** The per page object count. */
+ uint32_t cPerPage;
+ /** Number of bits in the bitmap.
+ * @remarks This is higher or equal to cPerPage and it is aligned such that
+ * the search operation will be most efficient on x86/AMD64. */
+ uint32_t cBits;
+ /** The maximum number of objects. */
+ uint32_t cMax;
+ /** Whether to the use the free list or not. */
+ bool fUseFreeList;
+ /** Head of the page list. */
+ PRTMEMCACHEPAGE pPageHead;
+ /** Poiner to the insertion point in the page list. */
+ PRTMEMCACHEPAGE volatile *ppPageNext;
+ /** Constructor callback. */
+ PFNMEMCACHECTOR pfnCtor;
+ /** Destructor callback. */
+ PFNMEMCACHEDTOR pfnDtor;
+ /** Callback argument. */
+ void *pvUser;
+ /** Critical section serializing page allocation and similar. */
+ RTCRITSECT CritSect;
+
+ /** The total object count. */
+ uint32_t volatile cTotal;
+ /** The number of free objects. */
+ int32_t volatile cFree;
+ /** This may point to a page with free entries. */
+ PRTMEMCACHEPAGE volatile pPageHint;
+ /** Stack of free items.
+ * These are marked as used in the allocation bitmaps.
+ *
+ * @todo This doesn't scale well when several threads are beating on the
+ * cache. Also, it totally doesn't work when the objects are too
+ * small. */
+ PRTMEMCACHEFREEOBJ volatile pFreeTop;
+} RTMEMCACHEINT;
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void rtMemCacheFreeList(RTMEMCACHEINT *pThis, PRTMEMCACHEFREEOBJ pHead);
+
+
+RTDECL(int) RTMemCacheCreate(PRTMEMCACHE phMemCache, size_t cbObject, size_t cbAlignment, uint32_t cMaxObjects,
+ PFNMEMCACHECTOR pfnCtor, PFNMEMCACHEDTOR pfnDtor, void *pvUser, uint32_t fFlags)
+
+{
+ AssertPtr(phMemCache);
+ AssertPtrNull(pfnCtor);
+ AssertPtrNull(pfnDtor);
+ AssertReturn(!pfnDtor || pfnCtor, VERR_INVALID_PARAMETER);
+ AssertReturn(cbObject > 0, VERR_INVALID_PARAMETER);
+ AssertReturn(cbObject <= PAGE_SIZE / 8, VERR_INVALID_PARAMETER);
+ AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
+
+ if (cbAlignment == 0)
+ {
+ if (cbObject <= 2)
+ cbAlignment = cbObject;
+ else if (cbObject <= 4)
+ cbAlignment = 4;
+ else if (cbObject <= 8)
+ cbAlignment = 8;
+ else if (cbObject <= 16)
+ cbAlignment = 16;
+ else if (cbObject <= 32)
+ cbAlignment = 32;
+ else
+ cbAlignment = 64;
+ }
+ else
+ {
+ AssertReturn(!((cbAlignment - 1) & cbAlignment), VERR_NOT_POWER_OF_TWO);
+ AssertReturn(cbAlignment <= 64, VERR_OUT_OF_RANGE);
+ }
+
+ /*
+ * Allocate and initialize the instance memory.
+ */
+ RTMEMCACHEINT *pThis = (RTMEMCACHEINT *)RTMemAlloc(sizeof(*pThis));
+ if (!pThis)
+ return VERR_NO_MEMORY;
+ int rc = RTCritSectInit(&pThis->CritSect);
+ if (RT_FAILURE(rc))
+ {
+ RTMemFree(pThis);
+ return rc;
+ }
+
+ pThis->u32Magic = RTMEMCACHE_MAGIC;
+ pThis->cbObject = (uint32_t)RT_ALIGN_Z(cbObject, cbAlignment);
+ pThis->cbAlignment = (uint32_t)cbAlignment;
+ pThis->cPerPage = (uint32_t)((PAGE_SIZE - RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), cbAlignment)) / pThis->cbObject);
+ while ( RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), 8)
+ + pThis->cPerPage * pThis->cbObject
+ + RT_ALIGN(pThis->cPerPage, 64) / 8 * 2
+ > PAGE_SIZE)
+ pThis->cPerPage--;
+ pThis->cBits = RT_ALIGN(pThis->cPerPage, 64);
+ pThis->cMax = cMaxObjects;
+ pThis->fUseFreeList = cbObject >= sizeof(RTMEMCACHEFREEOBJ)
+ && !pfnCtor
+ && !pfnDtor;
+ pThis->pPageHead = NULL;
+ pThis->ppPageNext = &pThis->pPageHead;
+ pThis->pfnCtor = pfnCtor;
+ pThis->pfnDtor = pfnDtor;
+ pThis->pvUser = pvUser;
+ pThis->cTotal = 0;
+ pThis->cFree = 0;
+ pThis->pPageHint = NULL;
+ pThis->pFreeTop = NULL;
+
+ *phMemCache = pThis;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(int) RTMemCacheDestroy(RTMEMCACHE hMemCache)
+{
+ RTMEMCACHEINT *pThis = hMemCache;
+ if (!pThis)
+ return VINF_SUCCESS;
+ AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
+ AssertReturn(pThis->u32Magic == RTMEMCACHE_MAGIC, VERR_INVALID_HANDLE);
+
+#if 0 /*def RT_STRICT - don't require eveything to be freed. Caches are very convenient for lazy cleanup. */
+ uint32_t cFree = pThis->cFree;
+ for (PRTMEMCACHEFREEOBJ pFree = pThis->pFreeTop; pFree && cFree < pThis->cTotal + 5; pFree = pFree->pNext)
+ cFree++;
+ AssertMsg(cFree == pThis->cTotal, ("cFree=%u cTotal=%u\n", cFree, pThis->cTotal));
+#endif
+
+ /*
+ * Destroy it.
+ */
+ AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTMEMCACHE_MAGIC_DEAD, RTMEMCACHE_MAGIC), VERR_INVALID_HANDLE);
+ RTCritSectDelete(&pThis->CritSect);
+
+ while (pThis->pPageHead)
+ {
+ PRTMEMCACHEPAGE pPage = pThis->pPageHead;
+ pThis->pPageHead = pPage->pNext;
+ pPage->cFree = 0;
+
+ if (pThis->pfnDtor)
+ {
+ uint32_t iObj = pPage->cObjects;
+ while (iObj-- > 0)
+ if (ASMBitTestAndClear(pPage->pbmCtor, iObj))
+ pThis->pfnDtor(hMemCache, pPage->pbObjects + iObj * pThis->cbObject, pThis->pvUser);
+ }
+
+ RTMemPageFree(pPage, PAGE_SIZE);
+ }
+
+ RTMemFree(pThis);
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Grows the cache.
+ *
+ * @returns IPRT status code.
+ * @param pThis The memory cache instance.
+ */
+static int rtMemCacheGrow(RTMEMCACHEINT *pThis)
+{
+ /*
+ * Enter the critical section here to avoid allocation races leading to
+ * wasted memory (++) and make it easier to link in the new page.
+ */
+ RTCritSectEnter(&pThis->CritSect);
+ int rc = VINF_SUCCESS;
+ if (pThis->cFree < 0)
+ {
+ /*
+ * Allocate and initialize the new page.
+ *
+ * We put the constructor bitmap at the lower end right after cFree.
+ * We then push the object array to the end of the page and place the
+ * allocation bitmap below it. The hope is to increase the chance that
+ * the allocation bitmap is in a different cache line than cFree since
+ * this increases performance markably when lots of threads are beating
+ * on the cache.
+ */
+ PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)RTMemPageAlloc(PAGE_SIZE);
+ if (pPage)
+ {
+ uint32_t const cObjects = RT_MIN(pThis->cPerPage, pThis->cMax - pThis->cTotal);
+
+ ASMMemZeroPage(pPage);
+ pPage->pCache = pThis;
+ pPage->pNext = NULL;
+ pPage->cFree = cObjects;
+ pPage->cObjects = cObjects;
+ uint8_t *pb = (uint8_t *)(pPage + 1);
+ pb = RT_ALIGN_PT(pb, 8, uint8_t *);
+ pPage->pbmCtor = pb;
+ pb = (uint8_t *)pPage + PAGE_SIZE - pThis->cbObject * cObjects;
+ pPage->pbObjects = pb; Assert(RT_ALIGN_P(pb, pThis->cbAlignment) == pb);
+ pb -= pThis->cBits / 8;
+ pb = (uint8_t *)((uintptr_t)pb & ~(uintptr_t)7);
+ pPage->pbmAlloc = pb;
+ Assert((uintptr_t)pPage->pbmCtor + pThis->cBits / 8 <= (uintptr_t)pPage->pbmAlloc);
+
+ /* Mark the bitmap padding and any unused objects as allocated. */
+ for (uint32_t iBit = cObjects; iBit < pThis->cBits; iBit++)
+ ASMBitSet(pPage->pbmAlloc, iBit);
+
+ /* Make it the hint. */
+ ASMAtomicWritePtr(&pThis->pPageHint, pPage);
+
+ /* Link the page in at the end of the list. */
+ ASMAtomicWritePtr(pThis->ppPageNext, pPage);
+ pThis->ppPageNext = &pPage->pNext;
+
+ /* Add it to the page counts. */
+ ASMAtomicAddS32(&pThis->cFree, cObjects);
+ ASMAtomicAddU32(&pThis->cTotal, cObjects);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+ RTCritSectLeave(&pThis->CritSect);
+ return rc;
+}
+
+
+/**
+ * Grabs a an object in a page.
+ * @returns New cFree value on success (0 or higher), -1 on failure.
+ * @param pPage Pointer to the page.
+ */
+DECL_FORCE_INLINE(int32_t) rtMemCacheGrabObj(PRTMEMCACHEPAGE pPage)
+{
+ if (ASMAtomicUoReadS32(&pPage->cFree) > 0)
+ {
+ int32_t cFreeNew = ASMAtomicDecS32(&pPage->cFree);
+ if (cFreeNew >= 0)
+ return cFreeNew;
+ ASMAtomicIncS32(&pPage->cFree);
+ }
+ return -1;
+}
+
+
+RTDECL(int) RTMemCacheAllocEx(RTMEMCACHE hMemCache, void **ppvObj)
+{
+ RTMEMCACHEINT *pThis = hMemCache;
+ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
+ AssertReturn(pThis->u32Magic == RTMEMCACHE_MAGIC, VERR_INVALID_PARAMETER);
+
+ /*
+ * Try grab a free object from the stack.
+ */
+ PRTMEMCACHEFREEOBJ pObj = ASMAtomicUoReadPtrT(&pThis->pFreeTop, PRTMEMCACHEFREEOBJ);
+ if (pObj)
+ {
+ pObj = ASMAtomicXchgPtrT(&pThis->pFreeTop, NULL, PRTMEMCACHEFREEOBJ);
+ if (pObj)
+ {
+ if (pObj->pNext)
+ {
+ Assert(pObj->pNext != pObj);
+ PRTMEMCACHEFREEOBJ pAllocRace = ASMAtomicXchgPtrT(&pThis->pFreeTop, pObj->pNext, PRTMEMCACHEFREEOBJ);
+ if (pAllocRace)
+ rtMemCacheFreeList(pThis, pAllocRace);
+ }
+
+ pObj->pNext = NULL;
+ *ppvObj = pObj;
+ return VINF_SUCCESS;
+ }
+ }
+
+ /*
+ * Try grab a free object at the cache level.
+ */
+ int32_t cNewFree = ASMAtomicDecS32(&pThis->cFree);
+ if (RT_LIKELY(cNewFree < 0))
+ {
+ uint32_t cTotal = ASMAtomicUoReadU32(&pThis->cTotal);
+ if ( (uint32_t)(cTotal + -cNewFree) > pThis->cMax
+ || (uint32_t)(cTotal + -cNewFree) <= cTotal)
+ {
+ ASMAtomicIncS32(&pThis->cFree);
+ return VERR_MEM_CACHE_MAX_SIZE;
+ }
+
+ int rc = rtMemCacheGrow(pThis);
+ if (RT_FAILURE(rc))
+ {
+ ASMAtomicIncS32(&pThis->cFree);
+ return rc;
+ }
+ }
+
+ /*
+ * Grab a free object at the page level.
+ */
+ PRTMEMCACHEPAGE pPage = ASMAtomicUoReadPtrT(&pThis->pPageHint, PRTMEMCACHEPAGE);
+ int32_t iObj = pPage ? rtMemCacheGrabObj(pPage) : -1;
+ if (iObj < 0)
+ {
+ for (unsigned cLoops = 0; ; cLoops++)
+ {
+ for (pPage = pThis->pPageHead; pPage; pPage = pPage->pNext)
+ {
+ iObj = rtMemCacheGrabObj(pPage);
+ if (iObj >= 0)
+ {
+ if (iObj > 0)
+ ASMAtomicWritePtr(&pThis->pPageHint, pPage);
+ break;
+ }
+ }
+ if (iObj >= 0)
+ break;
+ Assert(cLoops != 2);
+ Assert(cLoops < 10);
+ }
+ }
+ Assert(iObj >= 0);
+ Assert((uint32_t)iObj < pThis->cMax);
+
+ /*
+ * Find a free object in the allocation bitmap. Use the new cFree count
+ * as a hint.
+ */
+ if (ASMAtomicBitTestAndSet(pPage->pbmAlloc, iObj))
+ {
+ for (unsigned cLoops2 = 0;; cLoops2++)
+ {
+ iObj = ASMBitFirstClear(pPage->pbmAlloc, pThis->cBits);
+ if (RT_LIKELY(iObj >= 0))
+ {
+ if (!ASMAtomicBitTestAndSet(pPage->pbmAlloc, iObj))
+ break;
+ }
+ else
+ ASMMemoryFence();
+ Assert(cLoops2 != 40);
+ }
+ Assert(iObj >= 0);
+ }
+ void *pvObj = &pPage->pbObjects[iObj * pThis->cbObject];
+ Assert((uintptr_t)pvObj - (uintptr_t)pPage < PAGE_SIZE);
+
+ /*
+ * Call the constructor?
+ */
+ if ( pThis->pfnCtor
+ && !ASMAtomicBitTestAndSet(pPage->pbmCtor, iObj))
+ {
+ int rc = pThis->pfnCtor(hMemCache, pvObj, pThis->pvUser);
+ if (RT_FAILURE(rc))
+ {
+ ASMAtomicBitClear(pPage->pbmCtor, iObj);
+ RTMemCacheFree(pThis, pvObj);
+ return rc;
+ }
+ }
+
+ *ppvObj = pvObj;
+ return VINF_SUCCESS;
+}
+
+
+RTDECL(void *) RTMemCacheAlloc(RTMEMCACHE hMemCache)
+{
+ void *pvObj;
+ int rc = RTMemCacheAllocEx(hMemCache, &pvObj);
+ if (RT_SUCCESS(rc))
+ return pvObj;
+ return NULL;
+}
+
+
+
+/**
+ * Really frees one object.
+ *
+ * @param pThis The memory cache.
+ * @param pvObj The memory object to free.
+ */
+static void rtMemCacheFreeOne(RTMEMCACHEINT *pThis, void *pvObj)
+{
+ /* Note: Do *NOT* attempt to poison the object! */
+
+ /*
+ * Find the cache page. The page structure is at the start of the page.
+ */
+ PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)(((uintptr_t)pvObj) & ~(uintptr_t)PAGE_OFFSET_MASK);
+ Assert(pPage->pCache == pThis);
+ Assert(ASMAtomicUoReadS32(&pPage->cFree) < (int32_t)pThis->cPerPage);
+
+ /*
+ * Clear the bitmap bit and update the two object counter. Order matters!
+ */
+ uintptr_t offObj = (uintptr_t)pvObj - (uintptr_t)pPage->pbObjects;
+ uintptr_t iObj = offObj / pThis->cbObject;
+ Assert(iObj * pThis->cbObject == offObj);
+ Assert(iObj < pThis->cPerPage);
+ AssertReturnVoid(ASMAtomicBitTestAndClear(pPage->pbmAlloc, iObj));
+
+ ASMAtomicIncS32(&pPage->cFree);
+ ASMAtomicIncS32(&pThis->cFree);
+}
+
+
+/**
+ * Really frees a list of 'freed' object.
+ *
+ * @param pThis The memory cache.
+ * @param pHead The head of the list.
+ */
+static void rtMemCacheFreeList(RTMEMCACHEINT *pThis, PRTMEMCACHEFREEOBJ pHead)
+{
+ while (pHead)
+ {
+ PRTMEMCACHEFREEOBJ pFreeMe = pHead;
+ pHead = pHead->pNext;
+ pFreeMe->pNext = NULL;
+ ASMCompilerBarrier();
+ rtMemCacheFreeOne(pThis, pFreeMe);
+ }
+}
+
+
+
+RTDECL(void) RTMemCacheFree(RTMEMCACHE hMemCache, void *pvObj)
+{
+ if (!pvObj)
+ return;
+
+ RTMEMCACHEINT *pThis = hMemCache;
+ AssertPtrReturnVoid(pThis);
+ AssertReturnVoid(pThis->u32Magic == RTMEMCACHE_MAGIC);
+
+ AssertPtr(pvObj);
+ Assert(RT_ALIGN_P(pvObj, pThis->cbAlignment) == pvObj);
+
+ if (!pThis->fUseFreeList)
+ rtMemCacheFreeOne(pThis, pvObj);
+ else
+ {
+# ifdef RT_STRICT
+ /* This is the same as the other branch, except it's not actually freed. */
+ PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)(((uintptr_t)pvObj) & ~(uintptr_t)PAGE_OFFSET_MASK);
+ Assert(pPage->pCache == pThis);
+ Assert(ASMAtomicUoReadS32(&pPage->cFree) < (int32_t)pThis->cPerPage);
+ uintptr_t offObj = (uintptr_t)pvObj - (uintptr_t)pPage->pbObjects;
+ uintptr_t iObj = offObj / pThis->cbObject;
+ Assert(iObj * pThis->cbObject == offObj);
+ Assert(iObj < pThis->cPerPage);
+ AssertReturnVoid(ASMBitTest(pPage->pbmAlloc, (int32_t)iObj));
+# endif
+
+ /*
+ * Push it onto the free stack.
+ */
+ PRTMEMCACHEFREEOBJ pObj = (PRTMEMCACHEFREEOBJ)pvObj;
+ pObj->pNext = ASMAtomicXchgPtrT(&pThis->pFreeTop, NULL, PRTMEMCACHEFREEOBJ);
+ PRTMEMCACHEFREEOBJ pFreeRace = ASMAtomicXchgPtrT(&pThis->pFreeTop, pObj, PRTMEMCACHEFREEOBJ);
+ if (pFreeRace)
+ rtMemCacheFreeList(pThis, pFreeRace);
+ }
+}
+
diff --git a/src/VBox/Runtime/common/alloc/memtracker.cpp b/src/VBox/Runtime/common/alloc/memtracker.cpp
new file mode 100644
index 00000000..f28177be
--- /dev/null
+++ b/src/VBox/Runtime/common/alloc/memtracker.cpp
@@ -0,0 +1,1349 @@
+/* $Id: memtracker.cpp $ */
+/** @file
+ * IPRT - Memory Tracker & Leak Detector.
+ */
+
+/*
+ * Copyright (C) 2010-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/memtracker.h>
+#include "internal/iprt.h"
+
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/avl.h>
+#include <iprt/critsect.h>
+#ifdef IN_RING3
+# include <iprt/file.h>
+#endif
+#include <iprt/errcore.h>
+#include <iprt/list.h>
+#include <iprt/log.h>
+#include <iprt/mem.h>
+#include <iprt/semaphore.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+
+#include "internal/file.h"
+#include "internal/magics.h"
+#include "internal/strhash.h"
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+/** Pointer to a memory tracker instance */
+typedef struct RTMEMTRACKERINT *PRTMEMTRACKERINT;
+
+/**
+ * Memory tracker statistics.
+ */
+typedef struct RTMEMTRACKERSTATS
+{
+ /** Array of method calls. */
+ uint64_t volatile acMethodCalls[RTMEMTRACKERMETHOD_END];
+ /** The number of times this user freed or reallocated a memory block
+ * orignally allocated by someone else. */
+ uint64_t volatile cUserChanges;
+ /** The total number of bytes allocated ever. */
+ uint64_t volatile cbTotalAllocated;
+ /** The total number of blocks allocated ever. */
+ uint64_t volatile cTotalAllocatedBlocks;
+ /** The number of bytes currently allocated. */
+ size_t volatile cbAllocated;
+ /** The number of blocks currently allocated. */
+ size_t volatile cAllocatedBlocks;
+} RTMEMTRACKERSTATS;
+/** Pointer to memory tracker statistics. */
+typedef RTMEMTRACKERSTATS *PRTMEMTRACKERSTATS;
+
+
+/**
+ * Memory tracker user data.
+ */
+typedef struct RTMEMTRACKERUSER
+{
+ /** Entry in the user list (RTMEMTRACKERINT::UserList). */
+ RTLISTNODE ListEntry;
+ /** Pointer to the tracker. */
+ PRTMEMTRACKERINT pTracker;
+ /** Critical section protecting the memory list. */
+ RTCRITSECT CritSect;
+ /** The list of memory allocated by this user (RTMEMTRACKERHDR). */
+ RTLISTANCHOR MemoryList;
+ /** Positive numbers indicates recursion.
+ * Negative numbers are used for the global user since that is shared by
+ * more than one thread. */
+ int32_t volatile cInTracker;
+ /** The user identifier. */
+ uint32_t idUser;
+ /** The statistics for this user. */
+ RTMEMTRACKERSTATS Stats;
+ /** The user (thread) name. */
+ char szName[32];
+} RTMEMTRACKERUSER;
+/** Pointer to memory tracker per user data. */
+typedef RTMEMTRACKERUSER *PRTMEMTRACKERUSER;
+
+
+/**
+ * Memory tracker per tag statistics.
+ */
+typedef struct RTMEMTRACKERTAG
+{
+ /** AVL node core for lookup by hash. */
+ AVLU32NODECORE Core;
+ /** Tag list entry for flat traversal while dumping. */
+ RTLISTNODE ListEntry;
+ /** Pointer to the next tag with the same hash (collisions). */
+ PRTMEMTRACKERTAG pNext;
+ /** The tag statistics. */
+ RTMEMTRACKERSTATS Stats;
+ /** The tag name length. */
+ size_t cchTag;
+ /** The tag string. */
+ char szTag[1];
+} RTMEMTRACKERTAG;
+
+
+/**
+ * The memory tracker instance.
+ */
+typedef struct RTMEMTRACKERINT
+{
+ /** Cross roads semaphore separating dumping and normal operation.
+ * - NS - normal tracking.
+ * - EW - dumping tracking data. */
+ RTSEMXROADS hXRoads;
+
+ /** Critical section protecting the user list and tag database. */
+ RTCRITSECT CritSect;
+ /** List of RTMEMTRACKERUSER records. */
+ RTLISTANCHOR UserList;
+ /** The next user identifier number. */
+ uint32_t idUserNext;
+ /** The TLS index used for the per thread user records. */
+ RTTLS iTls;
+ /** Cross roads semaphore used to protect the tag database.
+ * - NS - lookup.
+ * - EW + critsect - insertion.
+ * @todo Replaced this by a read-write semaphore. */
+ RTSEMXROADS hXRoadsTagDb;
+ /** The root of the tag lookup database. */
+ AVLU32TREE TagDbRoot;
+ /** List of RTMEMTRACKERTAG records. */
+ RTLISTANCHOR TagList;
+#if ARCH_BITS == 32
+ /** Alignment padding. */
+ uint32_t u32Alignment;
+#endif
+ /** The global user record (fallback). */
+ RTMEMTRACKERUSER FallbackUser;
+ /** The global statistics. */
+ RTMEMTRACKERSTATS GlobalStats;
+ /** The number of busy (recursive) allocations. */
+ uint64_t volatile cBusyAllocs;
+ /** The number of busy (recursive) frees. */
+ uint64_t volatile cBusyFrees;
+ /** The number of tags. */
+ uint32_t cTags;
+ /** The number of users. */
+ uint32_t cUsers;
+} RTMEMTRACKERINT;
+AssertCompileMemberAlignment(RTMEMTRACKERINT, FallbackUser, 8);
+
+
+/**
+ * Output callback structure.
+ */
+typedef struct RTMEMTRACKEROUTPUT
+{
+ /** The printf like callback. */
+ DECLCALLBACKMEMBER(void, pfnPrintf)(struct RTMEMTRACKEROUTPUT *pThis, const char *pszFormat, ...);
+
+ /** The data. */
+ union
+ {
+ RTFILE hFile;
+ } uData;
+} RTMEMTRACKEROUTPUT;
+/** Pointer to a memory tracker output callback structure. */
+typedef RTMEMTRACKEROUTPUT *PRTMEMTRACKEROUTPUT;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Pointer to the default memory tracker. */
+static PRTMEMTRACKERINT g_pDefaultTracker = NULL;
+
+
+/**
+ * Creates a memory tracker.
+ *
+ * @returns IRPT status code.
+ * @param ppTracker Where to return the tracker instance.
+ */
+static int rtMemTrackerCreate(PRTMEMTRACKERINT *ppTracker)
+{
+ PRTMEMTRACKERINT pTracker = (PRTMEMTRACKERINT)RTMemAllocZ(sizeof(*pTracker));
+ if (!pTracker)
+ return VERR_NO_MEMORY;
+
+ /*
+ * Create locks and stuff.
+ */
+ int rc = RTCritSectInitEx(&pTracker->CritSect,
+ RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
+ NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTSemXRoadsCreate(&pTracker->hXRoads);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTSemXRoadsCreate(&pTracker->hXRoadsTagDb);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTTlsAllocEx(&pTracker->iTls, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ rc = RTCritSectInitEx(&pTracker->FallbackUser.CritSect,
+ RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
+ NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Initialize the rest of the structure.
+ */
+ RTListInit(&pTracker->UserList);
+ RTListInit(&pTracker->TagList);
+ RTListInit(&pTracker->FallbackUser.ListEntry);
+ RTListInit(&pTracker->FallbackUser.MemoryList);
+ pTracker->FallbackUser.pTracker = pTracker;
+ pTracker->FallbackUser.cInTracker = INT32_MIN / 2;
+ pTracker->FallbackUser.idUser = pTracker->idUserNext++;
+ strcpy(pTracker->FallbackUser.szName, "fallback");
+
+ *ppTracker = pTracker;
+ return VINF_SUCCESS;
+ }
+
+ RTTlsFree(pTracker->iTls);
+ }
+ RTSemXRoadsDestroy(pTracker->hXRoadsTagDb);
+ }
+ RTSemXRoadsDestroy(pTracker->hXRoads);
+ }
+ RTCritSectDelete(&pTracker->CritSect);
+ }
+ return rc;
+}
+
+
+/**
+ * Gets the user record to use.
+ *
+ * @returns Pointer to a user record.
+ * @param pTracker The tracker instance.
+ */
+static PRTMEMTRACKERUSER rtMemTrackerGetUser(PRTMEMTRACKERINT pTracker)
+{
+ /* ASSUMES that RTTlsGet and RTTlsSet will not reenter. */
+ PRTMEMTRACKERUSER pUser = (PRTMEMTRACKERUSER)RTTlsGet(pTracker->iTls);
+ if (RT_UNLIKELY(!pUser))
+ {
+ /*
+ * Is the thread currently initializing or terminating?
+ * If so, don't try add any user record for it as RTThread may barf or
+ * we might not get the thread name.
+ */
+ if (!RTThreadIsSelfAlive())
+ return &pTracker->FallbackUser;
+
+ /*
+ * Allocate and initialize a new user record for this thread.
+ *
+ * We install the fallback user record while doing the allocation and
+ * locking so that we can deal with recursions.
+ */
+ int rc = RTTlsSet(pTracker->iTls, &pTracker->FallbackUser);
+ if (RT_SUCCESS(rc))
+ {
+ pUser = (PRTMEMTRACKERUSER)RTMemAllocZ(sizeof(*pUser));
+ if (pUser)
+ {
+ rc = RTCritSectInitEx(&pUser->CritSect,
+ RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
+ NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ RTListInit(&pUser->ListEntry);
+ RTListInit(&pUser->MemoryList);
+ pUser->pTracker = pTracker;
+ pUser->cInTracker = 1;
+
+ const char *pszName = RTThreadSelfName();
+ if (pszName)
+ RTStrCopy(pUser->szName, sizeof(pUser->szName), pszName);
+
+ /*
+ * Register the new user record.
+ */
+ rc = RTTlsSet(pTracker->iTls, pUser);
+ if (RT_SUCCESS(rc))
+ {
+ RTCritSectEnter(&pTracker->CritSect);
+
+ pUser->idUser = pTracker->idUserNext++;
+ RTListAppend(&pTracker->UserList, &pUser->ListEntry);
+ pTracker->cUsers++;
+
+ RTCritSectLeave(&pTracker->CritSect);
+ return pUser;
+ }
+
+ RTCritSectDelete(&pUser->CritSect);
+ }
+ RTMemFree(pUser);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ }
+
+ /* Failed, user the fallback. */
+ pUser = &pTracker->FallbackUser;
+ }
+
+ ASMAtomicIncS32(&pUser->cInTracker);
+ return pUser;
+}
+
+
+/**
+ * Counterpart to rtMemTrackerGetUser.
+ *
+ * @param pUser The user record to 'put' back.
+ */
+DECLINLINE(void) rtMemTrackerPutUser(PRTMEMTRACKERUSER pUser)
+{
+ ASMAtomicDecS32(&pUser->cInTracker);
+}
+
+
+/**
+ * Get the tag record corresponding to @a pszTag.
+ *
+ * @returns The tag record. This may be NULL if we're out of memory or
+ * if something goes wrong.
+ *
+ * @param pTracker The tracker instance.
+ * @param pUser The user record of the caller. Must NOT be
+ * NULL. This is used to prevent infinite
+ * recursions when allocating a new tag record.
+ * @param pszTag The tag string. Can be NULL.
+ */
+DECLINLINE(PRTMEMTRACKERTAG) rtMemTrackerGetTag(PRTMEMTRACKERINT pTracker, PRTMEMTRACKERUSER pUser, const char *pszTag)
+{
+ AssertPtr(pTracker);
+ AssertPtr(pUser);
+ if (pUser->cInTracker <= 0)
+ return NULL;
+
+ /*
+ * Hash tag string.
+ */
+ size_t cchTag;
+ uint32_t uHash;
+ if (pszTag)
+ uHash = sdbmN(pszTag, 260, &cchTag);
+ else
+ {
+ pszTag = "";
+ cchTag = 0;
+ uHash = 0;
+ }
+
+ /*
+ * Look up the tag.
+ */
+ RTSemXRoadsNSEnter(pTracker->hXRoadsTagDb);
+ PRTMEMTRACKERTAG pTag = (PRTMEMTRACKERTAG)RTAvlU32Get(&pTracker->TagDbRoot, uHash);
+ while ( pTag
+ && ( pTag->cchTag != cchTag
+ || memcmp(pTag->szTag, pszTag, cchTag)) )
+ pTag = pTag->pNext;
+ RTSemXRoadsNSLeave(pTracker->hXRoadsTagDb);
+
+ /*
+ * Create a new tag record if not found.
+ */
+ if (RT_UNLIKELY(!pTag))
+ {
+ pTag = (PRTMEMTRACKERTAG)RTMemAllocZVar(RT_UOFFSETOF_DYN(RTMEMTRACKERTAG, szTag[cchTag + 1]));
+ if (pTag)
+ {
+ pTag->Core.Key = uHash;
+ pTag->cchTag = cchTag;
+ memcpy(pTag->szTag, pszTag, cchTag + 1);
+
+ RTSemXRoadsEWEnter(pTracker->hXRoadsTagDb);
+ RTCritSectEnter(&pTracker->CritSect);
+
+ void *pvFreeMe = NULL;
+ PRTMEMTRACKERTAG pHeadTag = (PRTMEMTRACKERTAG)RTAvlU32Get(&pTracker->TagDbRoot, uHash);
+ if (!pHeadTag)
+ {
+ RTAvlU32Insert(&pTracker->TagDbRoot, &pTag->Core);
+ RTListAppend(&pTracker->TagList, &pTag->ListEntry);
+ pTracker->cTags++;
+ }
+ else
+ {
+ PRTMEMTRACKERTAG pTag2 = pHeadTag;
+ while ( pTag2
+ && ( pTag2->cchTag != cchTag
+ || memcmp(pTag2->szTag, pszTag, cchTag)) )
+ pTag2 = pTag2->pNext;
+ if (RT_LIKELY(!pTag2))
+ {
+ pTag->pNext = pHeadTag->pNext;
+ pHeadTag->pNext = pTag;
+ RTListAppend(&pTracker->TagList, &pTag->ListEntry);
+ pTracker->cTags++;
+ }
+ else
+ {
+ pvFreeMe = pTag;
+ pTag = pTag2;
+ }
+ }
+
+ RTCritSectLeave(&pTracker->CritSect);
+ RTSemXRoadsEWLeave(pTracker->hXRoadsTagDb);
+
+ if (RT_LIKELY(pvFreeMe))
+ RTMemFree(pvFreeMe);
+ }
+ }
+
+ return pTag;
+}
+
+
+/**
+ * Counterpart to rtMemTrackerGetTag.
+ *
+ * @param pTag The tag record to 'put' back.
+ */
+DECLINLINE(void) rtMemTrackerPutTag(PRTMEMTRACKERTAG pTag)
+{
+ NOREF(pTag);
+}
+
+
+/**
+ * Record an allocation call.
+ *
+ * @param pStats The statistics record.
+ * @param cbUser The size of the allocation.
+ * @param enmMethod The allocation method.
+ */
+DECLINLINE(void) rtMemTrackerStateRecordAlloc(PRTMEMTRACKERSTATS pStats, size_t cbUser, RTMEMTRACKERMETHOD enmMethod)
+{
+ ASMAtomicAddU64(&pStats->cbTotalAllocated, cbUser);
+ ASMAtomicIncU64(&pStats->cTotalAllocatedBlocks);
+ ASMAtomicAddZ(&pStats->cbAllocated, cbUser);
+ ASMAtomicIncZ(&pStats->cAllocatedBlocks);
+ ASMAtomicIncU64(&pStats->acMethodCalls[enmMethod]);
+}
+
+
+/**
+ * Record a free call.
+ *
+ * @param pStats The statistics record.
+ * @param cbUser The size of the allocation.
+ * @param enmMethod The free method.
+ */
+DECLINLINE(void) rtMemTrackerStateRecordFree(PRTMEMTRACKERSTATS pStats, size_t cbUser, RTMEMTRACKERMETHOD enmMethod)
+{
+ ASMAtomicSubZ(&pStats->cbAllocated, cbUser);
+ ASMAtomicDecZ(&pStats->cAllocatedBlocks);
+ ASMAtomicIncU64(&pStats->acMethodCalls[enmMethod]);
+}
+
+
+/**
+ * Internal RTMemTrackerHdrAlloc and RTMemTrackerHdrAllocEx worker.
+ *
+ * @returns Pointer to the user data allocation.
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param pv The pointer to the allocated memory. This
+ * includes room for the header.
+ * @param cbUser The size requested by the user.
+ * @param pszTag The tag string.
+ * @param pvCaller The return address.
+ * @param enmMethod The allocation method.
+ */
+static void *rtMemTrackerHdrAllocEx(PRTMEMTRACKERINT pTracker, void *pv, size_t cbUser,
+ const char *pszTag, void *pvCaller, RTMEMTRACKERMETHOD enmMethod)
+{
+ /*
+ * Check input.
+ */
+ if (!pv)
+ return NULL;
+ AssertReturn(enmMethod > RTMEMTRACKERMETHOD_INVALID && enmMethod < RTMEMTRACKERMETHOD_END, NULL);
+
+ /*
+ * Initialize the header.
+ */
+ PRTMEMTRACKERHDR pHdr = (PRTMEMTRACKERHDR)pv;
+
+ pHdr->uMagic = RTMEMTRACKERHDR_MAGIC;
+ pHdr->cbUser = cbUser;
+ RTListInit(&pHdr->ListEntry);
+ pHdr->pUser = NULL;
+ pHdr->pszTag = pszTag;
+ pHdr->pTag = NULL;
+ pHdr->pvCaller = pvCaller;
+ pHdr->pvUser = pHdr + 1;
+ pHdr->uReserved = 0;
+
+ /*
+ * Add it to the tracker if we've got one.
+ */
+ if (pTracker)
+ {
+ PRTMEMTRACKERUSER pUser = rtMemTrackerGetUser(pTracker);
+ if (pUser->cInTracker == 1)
+ {
+ RTSemXRoadsNSEnter(pTracker->hXRoads);
+
+ /* Get the tag and update it's statistics. */
+ PRTMEMTRACKERTAG pTag = rtMemTrackerGetTag(pTracker, pUser, pszTag);
+ if (pTag)
+ {
+ pHdr->pTag = pTag;
+ rtMemTrackerStateRecordAlloc(&pTag->Stats, cbUser, enmMethod);
+ rtMemTrackerPutTag(pTag);
+ }
+
+ /* Link the header and update the user statistics. */
+ RTCritSectEnter(&pUser->CritSect);
+ RTListAppend(&pUser->MemoryList, &pHdr->ListEntry);
+ RTCritSectLeave(&pUser->CritSect);
+
+ pHdr->pUser = pUser;
+ rtMemTrackerStateRecordAlloc(&pUser->Stats, cbUser, enmMethod);
+
+ /* Update the global statistics. */
+ rtMemTrackerStateRecordAlloc(&pTracker->GlobalStats, cbUser, enmMethod);
+
+ RTSemXRoadsNSLeave(pTracker->hXRoads);
+ }
+ else
+ ASMAtomicIncU64(&pTracker->cBusyAllocs);
+ rtMemTrackerPutUser(pUser);
+ }
+
+ return pHdr + 1;
+}
+
+
+/**
+ * Internal worker for rtMemTrackerHdrFreeEx and rtMemTrackerHdrReallocPrep.
+ *
+ * @returns Pointer to the original block.
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param pvUser Pointer to the user memory.
+ * @param cbUser The size of the user memory or 0.
+ * @param pszTag The tag to associate the free with.
+ * @param pvCaller The return address.
+ * @param enmMethod The free method.
+ * @param uDeadMagic The dead magic value to use.
+ */
+static void *rtMemTrackerHdrFreeCommon(PRTMEMTRACKERINT pTracker, void *pvUser, size_t cbUser,
+ const char *pszTag, void *pvCaller, RTMEMTRACKERMETHOD enmMethod,
+ size_t uDeadMagic)
+{
+ PRTMEMTRACKERHDR pHdr = (PRTMEMTRACKERHDR)pvUser - 1;
+ AssertReturn(pHdr->uMagic == RTMEMTRACKERHDR_MAGIC, NULL);
+ Assert(pHdr->cbUser == cbUser || !cbUser); NOREF(cbUser);
+ Assert(pHdr->pvUser == pvUser);
+
+ AssertReturn(enmMethod > RTMEMTRACKERMETHOD_INVALID && enmMethod < RTMEMTRACKERMETHOD_END, NULL);
+
+ /*
+ * First mark it as free.
+ */
+ pHdr->uMagic = uDeadMagic;
+
+ /*
+ * If there is a association with a user, we need to unlink it and update
+ * the statistics.
+ *
+ * A note on the locking here. We don't take the crossroads semaphore when
+ * reentering the memory tracker on the same thread because we may be
+ * holding it in a different direction and would therefore deadlock.
+ */
+ PRTMEMTRACKERUSER pMemUser = pHdr->pUser;
+ if (pMemUser)
+ {
+ Assert(pMemUser->pTracker == pTracker); Assert(pTracker);
+ PRTMEMTRACKERUSER pCallingUser = rtMemTrackerGetUser(pTracker);
+ bool const fTakeXRoadsLock = pCallingUser->cInTracker <= 1;
+ if (fTakeXRoadsLock)
+ RTSemXRoadsNSEnter(pTracker->hXRoads);
+
+ RTCritSectEnter(&pMemUser->CritSect);
+ RTListNodeRemove(&pHdr->ListEntry);
+ RTCritSectLeave(&pMemUser->CritSect);
+
+ if (pCallingUser == pMemUser)
+ rtMemTrackerStateRecordFree(&pCallingUser->Stats, pHdr->cbUser, enmMethod);
+ else
+ {
+ ASMAtomicIncU64(&pCallingUser->Stats.cUserChanges);
+ ASMAtomicIncU64(&pCallingUser->Stats.acMethodCalls[enmMethod]);
+
+ ASMAtomicSubU64(&pMemUser->Stats.cbTotalAllocated, cbUser);
+ ASMAtomicSubZ(&pMemUser->Stats.cbAllocated, cbUser);
+ }
+
+ rtMemTrackerStateRecordFree(&pTracker->GlobalStats, pHdr->cbUser, enmMethod);
+
+ /** @todo we're currently ignoring pszTag, consider how to correctly
+ * attribute the free operation if the tags differ - if it
+ * makes sense at all... */
+ NOREF(pszTag);
+ if (pHdr->pTag)
+ rtMemTrackerStateRecordFree(&pHdr->pTag->Stats, pHdr->cbUser, enmMethod);
+
+
+ if (fTakeXRoadsLock)
+ RTSemXRoadsNSLeave(pTracker->hXRoads);
+ rtMemTrackerPutUser(pCallingUser);
+ }
+ else
+ {
+ /*
+ * No tracked. This may happen even when pTracker != NULL when the same
+ * thread reenters the tracker when allocating tracker structures or memory
+ * in some subroutine like threading and locking.
+ */
+ Assert(!pHdr->pTag);
+ if (pTracker)
+ ASMAtomicIncU64(&pTracker->cBusyFrees);
+ }
+
+ NOREF(pvCaller); /* Intended for We may later do some use-after-free tracking. */
+ return pHdr;
+}
+
+
+/**
+ * Internal worker for RTMemTrackerHdrReallocPrep and
+ * RTMemTrackerHdrReallocPrepEx.
+ *
+ * @returns Pointer to the actual allocation.
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param pvOldUser The user memory.
+ * @param cbOldUser The size of the user memory, 0 if unknown.
+ * @param pszTag The tag string.
+ * @param pvCaller The return address.
+ */
+static void *rtMemTrackerHdrReallocPrepEx(PRTMEMTRACKERINT pTracker, void *pvOldUser, size_t cbOldUser,
+ const char *pszTag, void *pvCaller)
+{
+ if (!pvOldUser)
+ return NULL;
+ return rtMemTrackerHdrFreeCommon(pTracker, pvOldUser, cbOldUser, pszTag, pvCaller,
+ RTMEMTRACKERMETHOD_REALLOC_PREP, RTMEMTRACKERHDR_MAGIC_REALLOC);
+}
+
+
+/**
+ * Internal worker for RTMemTrackerHdrReallocDone and
+ * RTMemTrackerHdrReallocDoneEx.
+ *
+ * @returns Pointer to the actual allocation.
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param pvNew The new memory chunk. Can be NULL.
+ * @param cbNewUser The size of the new memory chunk.
+ * @param pvOldUser Pointer to the old user memory.
+ * @param pszTag The tag string.
+ * @param pvCaller The return address.
+ */
+static void *rtMemTrackerHdrReallocDoneEx(PRTMEMTRACKERINT pTracker, void *pvNew, size_t cbNewUser,
+ void *pvOldUser, const char *pszTag, void *pvCaller)
+{
+ /* Succeeded? */
+ if (pvNew)
+ return rtMemTrackerHdrAllocEx(pTracker, pvNew, cbNewUser, pszTag, pvCaller, RTMEMTRACKERMETHOD_REALLOC_DONE);
+
+ /* Failed or just realloc to zero? */
+ if (cbNewUser)
+ {
+ PRTMEMTRACKERHDR pHdr = (PRTMEMTRACKERHDR)pvOldUser - 1;
+ AssertReturn(pHdr->uMagic == RTMEMTRACKERHDR_MAGIC_REALLOC, NULL);
+
+ return rtMemTrackerHdrAllocEx(pTracker, pHdr, pHdr->cbUser, pszTag, pvCaller, RTMEMTRACKERMETHOD_REALLOC_FAILED);
+ }
+
+ /* Tealloc to zero bytes, i.e. free. */
+ return NULL;
+}
+
+
+/**
+ * Internal worker for RTMemTrackerHdrFree and RTMemTrackerHdrFreeEx.
+ *
+ * @returns Pointer to the actual allocation.
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param pvUser The user memory.
+ * @param cbUser The size of the user memory, 0 if unknown.
+ * @param pszTag The tag string.
+ * @param pvCaller The return address.
+ * @param enmMethod The free method.
+ */
+static void *rtMemTrackerHdrFreeEx(PRTMEMTRACKERINT pTracker, void *pvUser, size_t cbUser,
+ const char *pszTag, void *pvCaller, RTMEMTRACKERMETHOD enmMethod)
+{
+ if (!pvUser)
+ return NULL;
+ return rtMemTrackerHdrFreeCommon(pTracker, pvUser, cbUser, pszTag, pvCaller, enmMethod, RTMEMTRACKERHDR_MAGIC_FREE);
+}
+
+
+/**
+ * Prints a statistics record.
+ *
+ * @param pStats The record.
+ * @param pOutput The output callback table.
+ * @param fVerbose Whether to print in terse or verbose form.
+ */
+DECLINLINE(void) rtMemTrackerDumpOneStatRecord(PRTMEMTRACKERSTATS pStats, PRTMEMTRACKEROUTPUT pOutput, bool fVerbose)
+{
+ if (fVerbose)
+ {
+ pOutput->pfnPrintf(pOutput,
+ " Currently allocated: %7zu blocks, %8zu bytes\n"
+ " Total allocation sum: %7RU64 blocks, %8RU64 bytes\n"
+ ,
+ pStats->cAllocatedBlocks,
+ pStats->cbAllocated,
+ pStats->cTotalAllocatedBlocks,
+ pStats->cbTotalAllocated);
+ pOutput->pfnPrintf(pOutput,
+ " Alloc: %7RU64 AllocZ: %7RU64 Free: %7RU64 User Chg: %7RU64\n"
+ " RPrep: %7RU64 RDone: %7RU64 RFail: %7RU64\n"
+ " New: %7RU64 New[]: %7RU64 Delete: %7RU64 Delete[]: %7RU64\n"
+ ,
+ pStats->acMethodCalls[RTMEMTRACKERMETHOD_ALLOC],
+ pStats->acMethodCalls[RTMEMTRACKERMETHOD_ALLOCZ],
+ pStats->acMethodCalls[RTMEMTRACKERMETHOD_FREE],
+ pStats->cUserChanges,
+ pStats->acMethodCalls[RTMEMTRACKERMETHOD_REALLOC_PREP],
+ pStats->acMethodCalls[RTMEMTRACKERMETHOD_REALLOC_DONE],
+ pStats->acMethodCalls[RTMEMTRACKERMETHOD_REALLOC_FAILED],
+ pStats->acMethodCalls[RTMEMTRACKERMETHOD_NEW],
+ pStats->acMethodCalls[RTMEMTRACKERMETHOD_NEW_ARRAY],
+ pStats->acMethodCalls[RTMEMTRACKERMETHOD_DELETE],
+ pStats->acMethodCalls[RTMEMTRACKERMETHOD_DELETE_ARRAY]);
+ }
+ else
+ {
+ pOutput->pfnPrintf(pOutput, " %zu bytes in %zu blocks\n",
+ pStats->cbAllocated, pStats->cAllocatedBlocks);
+ }
+}
+
+
+/**
+ * Internal worker that dumps all the memory tracking data.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param pOutput The output callback table.
+ */
+static void rtMemTrackerDumpAllWorker(PRTMEMTRACKERINT pTracker, PRTMEMTRACKEROUTPUT pOutput)
+{
+ if (!pTracker)
+ return;
+
+ /*
+ * We use the EW direction to make sure the lists, trees and statistics
+ * does not change while we're working.
+ */
+ PRTMEMTRACKERUSER pUser = rtMemTrackerGetUser(pTracker);
+ RTSemXRoadsEWEnter(pTracker->hXRoads);
+
+ /* Global statistics.*/
+ pOutput->pfnPrintf(pOutput, "*** Global statistics ***\n");
+ rtMemTrackerDumpOneStatRecord(&pTracker->GlobalStats, pOutput, true);
+ pOutput->pfnPrintf(pOutput, " Busy Allocs: %4RU64 Busy Frees: %4RU64 Tags: %3u Users: %3u\n",
+ pTracker->cBusyAllocs, pTracker->cBusyFrees, pTracker->cTags, pTracker->cUsers);
+
+ /* Per tag statistics. */
+ pOutput->pfnPrintf(pOutput, "\n*** Tag statistics ***\n");
+ PRTMEMTRACKERTAG pTag, pNextTag;
+ RTListForEachSafe(&pTracker->TagList, pTag, pNextTag, RTMEMTRACKERTAG, ListEntry)
+ {
+ pOutput->pfnPrintf(pOutput, "Tag: %s\n", pTag->szTag);
+ rtMemTrackerDumpOneStatRecord(&pTag->Stats, pOutput, true);
+ pOutput->pfnPrintf(pOutput, "\n", pTag->szTag);
+ }
+
+ /* Per user statistics & blocks. */
+ pOutput->pfnPrintf(pOutput, "\n*** User statistics ***\n");
+ PRTMEMTRACKERUSER pCurUser, pNextUser;
+ RTListForEachSafe(&pTracker->UserList, pCurUser, pNextUser, RTMEMTRACKERUSER, ListEntry)
+ {
+ pOutput->pfnPrintf(pOutput, "User #%u: %s%s (cInTracker=%d)\n",
+ pCurUser->idUser,
+ pCurUser->szName,
+ pUser == pCurUser ? " (me)" : "",
+ pCurUser->cInTracker);
+ rtMemTrackerDumpOneStatRecord(&pCurUser->Stats, pOutput, true);
+
+ PRTMEMTRACKERHDR pCurHdr, pNextHdr;
+ RTListForEachSafe(&pCurUser->MemoryList, pCurHdr, pNextHdr, RTMEMTRACKERHDR, ListEntry)
+ {
+ if (pCurHdr->pTag)
+ pOutput->pfnPrintf(pOutput,
+ " %zu bytes at %p by %p with tag %s\n"
+ "%.*Rhxd\n"
+ "\n",
+ pCurHdr->cbUser, pCurHdr->pvUser, pCurHdr->pvCaller, pCurHdr->pTag->szTag,
+ RT_MIN(pCurHdr->cbUser, 16*3), pCurHdr->pvUser);
+ else
+ pOutput->pfnPrintf(pOutput,
+ " %zu bytes at %p by %p without a tag\n"
+ "%.*Rhxd\n"
+ "\n",
+ pCurHdr->cbUser, pCurHdr->pvUser, pCurHdr->pvCaller,
+ RT_MIN(pCurHdr->cbUser, 16*3), pCurHdr->pvUser);
+ }
+ pOutput->pfnPrintf(pOutput, "\n", pTag->szTag);
+ }
+
+ /* Repeat the global statistics. */
+ pOutput->pfnPrintf(pOutput, "*** Global statistics (reprise) ***\n");
+ rtMemTrackerDumpOneStatRecord(&pTracker->GlobalStats, pOutput, true);
+ pOutput->pfnPrintf(pOutput, " Busy Allocs: %4RU64 Busy Frees: %4RU64 Tags: %3u Users: %3u\n",
+ pTracker->cBusyAllocs, pTracker->cBusyFrees, pTracker->cTags, pTracker->cUsers);
+
+ RTSemXRoadsEWLeave(pTracker->hXRoads);
+ rtMemTrackerPutUser(pUser);
+}
+
+
+/**
+ * Internal worker that dumps the memory tracking statistics.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param pOutput The output callback table.
+ * @param fVerbose Whether to the verbose or quiet.
+ */
+static void rtMemTrackerDumpStatsWorker(PRTMEMTRACKERINT pTracker, PRTMEMTRACKEROUTPUT pOutput, bool fVerbose)
+{
+ if (!pTracker)
+ return;
+
+ /*
+ * We use the EW direction to make sure the lists, trees and statistics
+ * does not change while we're working.
+ */
+ PRTMEMTRACKERUSER pUser = rtMemTrackerGetUser(pTracker);
+ RTSemXRoadsEWEnter(pTracker->hXRoads);
+
+ /* Global statistics.*/
+ pOutput->pfnPrintf(pOutput, "*** Global statistics ***\n");
+ rtMemTrackerDumpOneStatRecord(&pTracker->GlobalStats, pOutput, fVerbose);
+ if (fVerbose)
+ pOutput->pfnPrintf(pOutput, " Busy Allocs: %4RU64 Busy Frees: %4RU64 Tags: %3u Users: %3u\n",
+ pTracker->cBusyAllocs, pTracker->cBusyFrees, pTracker->cTags, pTracker->cUsers);
+
+ /* Per tag statistics. */
+ pOutput->pfnPrintf(pOutput, "\n*** Tag statistics ***\n");
+ PRTMEMTRACKERTAG pTag, pNextTag;
+ RTListForEachSafe(&pTracker->TagList, pTag, pNextTag, RTMEMTRACKERTAG, ListEntry)
+ {
+ if ( fVerbose
+ || pTag->Stats.cbAllocated)
+ {
+ pOutput->pfnPrintf(pOutput, "Tag: %s\n", pTag->szTag);
+ rtMemTrackerDumpOneStatRecord(&pTag->Stats, pOutput, fVerbose);
+ if (fVerbose)
+ pOutput->pfnPrintf(pOutput, "\n", pTag->szTag);
+ }
+ }
+
+ /* Per user statistics. */
+ pOutput->pfnPrintf(pOutput, "\n*** User statistics ***\n");
+ PRTMEMTRACKERUSER pCurUser, pNextUser;
+ RTListForEachSafe(&pTracker->UserList, pCurUser, pNextUser, RTMEMTRACKERUSER, ListEntry)
+ {
+ if ( fVerbose
+ || pCurUser->Stats.cbAllocated
+ || pCurUser == pUser)
+ {
+ pOutput->pfnPrintf(pOutput, "User #%u: %s%s (cInTracker=%d)\n",
+ pCurUser->idUser,
+ pCurUser->szName,
+ pUser == pCurUser ? " (me)" : "",
+ pCurUser->cInTracker);
+ rtMemTrackerDumpOneStatRecord(&pCurUser->Stats, pOutput, fVerbose);
+ if (fVerbose)
+ pOutput->pfnPrintf(pOutput, "\n", pTag->szTag);
+ }
+ }
+
+ if (fVerbose)
+ {
+ /* Repeat the global statistics. */
+ pOutput->pfnPrintf(pOutput, "*** Global statistics (reprise) ***\n");
+ rtMemTrackerDumpOneStatRecord(&pTracker->GlobalStats, pOutput, fVerbose);
+ pOutput->pfnPrintf(pOutput, " Busy Allocs: %4RU64 Busy Frees: %4RU64 Tags: %3u Users: %3u\n",
+ pTracker->cBusyAllocs, pTracker->cBusyFrees, pTracker->cTags, pTracker->cUsers);
+ }
+
+ RTSemXRoadsEWLeave(pTracker->hXRoads);
+ rtMemTrackerPutUser(pUser);
+}
+
+
+/**
+ * @callback_method_impl{RTMEMTRACKEROUTPUT::pfnPrintf, Outputting to the release log}
+ */
+static DECLCALLBACK(void) rtMemTrackerDumpLogOutput(PRTMEMTRACKEROUTPUT pThis, const char *pszFormat, ...)
+{
+ NOREF(pThis);
+ va_list va;
+ va_start(va, pszFormat);
+ RTLogPrintfV(pszFormat, va);
+ va_end(va);
+}
+
+
+/**
+ * Internal worker for RTMemTrackerDumpAllToLog and RTMemTrackerDumpAllToLogEx.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ */
+static void rtMemTrackerDumpAllToLogEx(PRTMEMTRACKERINT pTracker)
+{
+ RTMEMTRACKEROUTPUT Output;
+ Output.pfnPrintf = rtMemTrackerDumpLogOutput;
+ rtMemTrackerDumpAllWorker(pTracker, &Output);
+}
+
+
+/**
+ * Internal worker for RTMemTrackerDumpStatsToLog and
+ * RTMemTrackerDumpStatsToLogEx.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param fVerbose Whether to print all the stats or just the ones
+ * relevant to hunting leaks.
+ */
+static void rtMemTrackerDumpStatsToLogEx(PRTMEMTRACKERINT pTracker, bool fVerbose)
+{
+ RTMEMTRACKEROUTPUT Output;
+ Output.pfnPrintf = rtMemTrackerDumpLogOutput;
+ rtMemTrackerDumpStatsWorker(pTracker, &Output, fVerbose);
+}
+
+
+/**
+ * @callback_method_impl{RTMEMTRACKEROUTPUT::pfnPrintf, Outputting to the release log}
+ */
+static DECLCALLBACK(void) rtMemTrackerDumpLogRelOutput(PRTMEMTRACKEROUTPUT pThis, const char *pszFormat, ...)
+{
+ NOREF(pThis);
+ va_list va;
+ va_start(va, pszFormat);
+ RTLogRelPrintfV(pszFormat, va);
+ va_end(va);
+}
+
+
+/**
+ * Internal worker for RTMemTrackerDumpStatsToLog and
+ * RTMemTrackerDumpStatsToLogEx.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ */
+static void rtMemTrackerDumpAllToLogRelEx(PRTMEMTRACKERINT pTracker)
+{
+ RTMEMTRACKEROUTPUT Output;
+ Output.pfnPrintf = rtMemTrackerDumpLogRelOutput;
+ rtMemTrackerDumpAllWorker(pTracker, &Output);
+}
+
+
+/**
+ * Internal worker for RTMemTrackerDumpStatsToLogRel and
+ * RTMemTrackerDumpStatsToLogRelEx.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param fVerbose Whether to print all the stats or just the ones
+ * relevant to hunting leaks.
+ */
+static void rtMemTrackerDumpStatsToLogRelEx(PRTMEMTRACKERINT pTracker, bool fVerbose)
+{
+ RTMEMTRACKEROUTPUT Output;
+ Output.pfnPrintf = rtMemTrackerDumpLogRelOutput;
+ rtMemTrackerDumpStatsWorker(pTracker, &Output, fVerbose);
+}
+
+#ifdef IN_RING3
+
+/**
+ * @callback_method_impl{RTMEMTRACKEROUTPUT::pfnPrintf, Outputting to file}
+ */
+static DECLCALLBACK(void) rtMemTrackerDumpFileOutput(PRTMEMTRACKEROUTPUT pThis, const char *pszFormat, ...)
+{
+ va_list va;
+ va_start(va, pszFormat);
+ char szOutput[_4K];
+ size_t cchOutput = RTStrPrintfV(szOutput, sizeof(szOutput), pszFormat, va);
+ va_end(va);
+ RTFileWrite(pThis->uData.hFile, szOutput, cchOutput, NULL);
+}
+
+
+/**
+ * Internal work that dumps the memory tracking statistics to a file handle.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param fVerbose Whether to print all the stats or just the ones
+ * relevant to hunting leaks.
+ * @param hFile The file handle. Can be NIL_RTFILE.
+ */
+static void rtMemTrackerDumpStatsToFileHandle(PRTMEMTRACKERINT pTracker, bool fVerbose, RTFILE hFile)
+{
+ if (hFile == NIL_RTFILE)
+ return;
+ RTMEMTRACKEROUTPUT Output;
+ Output.pfnPrintf = rtMemTrackerDumpFileOutput;
+ Output.uData.hFile = hFile;
+ rtMemTrackerDumpStatsWorker(pTracker, &Output, fVerbose);
+}
+
+
+/**
+ * Internal work that dumps all the memory tracking information to a file
+ * handle.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param hFile The file handle. Can be NIL_RTFILE.
+ */
+static void rtMemTrackerDumpAllToFileHandle(PRTMEMTRACKERINT pTracker, RTFILE hFile)
+{
+ if (hFile == NIL_RTFILE)
+ return;
+ RTMEMTRACKEROUTPUT Output;
+ Output.pfnPrintf = rtMemTrackerDumpFileOutput;
+ Output.uData.hFile = hFile;
+ rtMemTrackerDumpAllWorker(pTracker, &Output);
+}
+
+
+/**
+ * Internal worker for RTMemTrackerDumpStatsToStdOut and
+ * RTMemTrackerDumpStatsToStdOutEx.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param fVerbose Whether to print all the stats or just the ones
+ * relevant to hunting leaks.
+ */
+static void rtMemTrackerDumpStatsToStdOutEx(PRTMEMTRACKERINT pTracker, bool fVerbose)
+{
+ rtMemTrackerDumpStatsToFileHandle(pTracker, fVerbose, rtFileGetStandard(RTHANDLESTD_OUTPUT));
+}
+
+
+/**
+ * Internal worker for RTMemTrackerDumpAllToStdOut and
+ * RTMemTrackerDumpAllToStdOutEx.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ */
+static void rtMemTrackerDumpAllToStdOutEx(PRTMEMTRACKERINT pTracker)
+{
+ rtMemTrackerDumpAllToFileHandle(pTracker, rtFileGetStandard(RTHANDLESTD_OUTPUT));
+}
+
+
+/**
+ * Internal worker for RTMemTrackerDumpStatsToStdErr and
+ * RTMemTrackerDumpStatsToStdErrEx.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param fVerbose Whether to print all the stats or just the ones
+ * relevant to hunting leaks.
+ */
+static void rtMemTrackerDumpStatsToStdErrEx(PRTMEMTRACKERINT pTracker, bool fVerbose)
+{
+ rtMemTrackerDumpStatsToFileHandle(pTracker, fVerbose, rtFileGetStandard(RTHANDLESTD_ERROR));
+}
+
+
+/**
+ * Internal worker for RTMemTrackerDumpAllToStdErr and
+ * RTMemTrackerDumpAllToStdErrEx.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ */
+static void rtMemTrackerDumpAllToStdErrEx(PRTMEMTRACKERINT pTracker)
+{
+ rtMemTrackerDumpAllToFileHandle(pTracker, rtFileGetStandard(RTHANDLESTD_ERROR));
+}
+
+
+/**
+ * Internal worker for RTMemTrackerDumpStatsToFile and
+ * RTMemTrackerDumpStatsToFileEx.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param fVerbose Whether to print all the stats or just the ones
+ * relevant to hunting leaks.
+ * @param pszFilename The name of the output file.
+ */
+static void rtMemTrackerDumpStatsToFileEx(PRTMEMTRACKERINT pTracker, bool fVerbose, const char *pszFilename)
+{
+ if (!pTracker)
+ return;
+
+ /** @todo this is borked. */
+ RTFILE hFile;
+ int rc = RTFileOpen(&hFile, pszFilename,
+ RTFILE_O_WRITE | RTFILE_O_CREATE_REPLACE | RTFILE_O_DENY_NONE
+ | (0600 << RTFILE_O_CREATE_MODE_SHIFT));
+ if (RT_FAILURE(rc))
+ return;
+ rtMemTrackerDumpStatsToFileHandle(pTracker, fVerbose, hFile);
+ RTFileClose(hFile);
+}
+
+
+/**
+ * Internal worker for RTMemTrackerDumpAllToFile and
+ * RTMemTrackerDumpAllToFileEx.
+ *
+ * @param pTracker The tracker instance. Can be NULL.
+ * @param pszFilename The name of the output file.
+ */
+static void rtMemTrackerDumpAllToFileEx(PRTMEMTRACKERINT pTracker, const char *pszFilename)
+{
+ if (!pTracker)
+ return;
+
+ RTFILE hFile;
+ int rc = RTFileOpen(&hFile, pszFilename,
+ RTFILE_O_WRITE | RTFILE_O_CREATE_REPLACE | RTFILE_O_DENY_NONE
+ | (0600 << RTFILE_O_CREATE_MODE_SHIFT));
+ if (RT_FAILURE(rc))
+ return;
+ rtMemTrackerDumpAllToFileHandle(pTracker, hFile);
+ RTFileClose(hFile);
+}
+
+#endif /* IN_RING3 */
+
+
+
+/*
+ *
+ *
+ * Default tracker.
+ * Default tracker.
+ * Default tracker.
+ * Default tracker.
+ * Default tracker.
+ *
+ *
+ */
+
+
+/**
+ * Handles the lazy initialization when g_pDefaultTracker is NULL.
+ *
+ * @returns The newly created default tracker or NULL.
+ */
+static PRTMEMTRACKERINT rtMemTrackerLazyInitDefaultTracker(void)
+{
+ /*
+ * Don't attempt initialize before RTThread has been initialized.
+ */
+ if (!RTThreadIsInitialized())
+ return NULL;
+
+ /*
+ * Only one initialization at a time. For now we'll ASSUME that there
+ * won't be thread ending up here at the same time, only the same
+ * reentering from the allocator when creating the tracker.
+ */
+ static volatile bool s_fInitialized = false;
+ if (ASMAtomicXchgBool(&s_fInitialized, true))
+ return g_pDefaultTracker;
+
+ PRTMEMTRACKERINT pTracker = NULL; /* gcc sucks. */
+ int rc = rtMemTrackerCreate(&pTracker);
+ if (RT_FAILURE(rc))
+ return NULL;
+
+ g_pDefaultTracker = pTracker;
+ return pTracker;
+}
+
+
+
+RTDECL(void *) RTMemTrackerHdrAlloc(void *pv, size_t cb, const char *pszTag, void *pvCaller, RTMEMTRACKERMETHOD enmMethod)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerHdrAllocEx(pTracker, pv, cb, pszTag, pvCaller, enmMethod);
+}
+
+
+RTDECL(void *) RTMemTrackerHdrReallocPrep(void *pvOldUser, size_t cbOldUser, const char *pszTag, void *pvCaller)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerHdrReallocPrepEx(pTracker, pvOldUser, cbOldUser, pszTag, pvCaller);
+}
+
+
+RTDECL(void *) RTMemTrackerHdrReallocDone(void *pvNew, size_t cbNewUser, void *pvOld, const char *pszTag, void *pvCaller)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerHdrReallocDoneEx(pTracker, pvNew, cbNewUser, pvOld, pszTag, pvCaller);
+}
+
+
+RTDECL(void *) RTMemTrackerHdrFree(void *pvUser, size_t cbUser, const char *pszTag, void *pvCaller, RTMEMTRACKERMETHOD enmMethod)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerHdrFreeEx(pTracker, pvUser, cbUser, pszTag, pvCaller, enmMethod);
+}
+
+
+RTDECL(void) RTMemTrackerDumpAllToLog(void)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerDumpAllToLogEx(pTracker);
+}
+
+
+RTDECL(void) RTMemTrackerDumpAllToLogRel(void)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerDumpAllToLogRelEx(pTracker);
+}
+
+
+RTDECL(void) RTMemTrackerDumpAllToStdOut(void)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerDumpAllToStdOutEx(pTracker);
+}
+
+
+RTDECL(void) RTMemTrackerDumpAllToStdErr(void)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerDumpAllToStdErrEx(pTracker);
+}
+
+
+RTDECL(void) RTMemTrackerDumpAllToFile(const char *pszFilename)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerDumpAllToFileEx(pTracker, pszFilename);
+}
+
+
+RTDECL(void) RTMemTrackerDumpStatsToLog(bool fVerbose)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerDumpStatsToLogEx(pTracker, fVerbose);
+}
+
+
+RTDECL(void) RTMemTrackerDumpStatsToLogRel(bool fVerbose)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerDumpStatsToLogRelEx(pTracker, fVerbose);
+}
+
+
+RTDECL(void) RTMemTrackerDumpStatsToStdOut(bool fVerbose)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerDumpStatsToStdOutEx(pTracker, fVerbose);
+}
+
+
+RTDECL(void) RTMemTrackerDumpStatsToStdErr(bool fVerbose)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerDumpStatsToStdErrEx(pTracker, fVerbose);
+}
+
+
+RTDECL(void) RTMemTrackerDumpStatsToFile(bool fVerbose, const char *pszFilename)
+{
+ PRTMEMTRACKERINT pTracker = g_pDefaultTracker;
+ if (RT_UNLIKELY(!pTracker))
+ pTracker = rtMemTrackerLazyInitDefaultTracker();
+ return rtMemTrackerDumpStatsToFileEx(pTracker, fVerbose, pszFilename);
+}
+