summaryrefslogtreecommitdiffstats
path: root/js/src/ds
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /js/src/ds
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/ds')
-rw-r--r--js/src/ds/AvlTree.h1041
-rw-r--r--js/src/ds/BitArray.h114
-rw-r--r--js/src/ds/Bitmap.cpp113
-rw-r--r--js/src/ds/Bitmap.h187
-rw-r--r--js/src/ds/Fifo.h187
-rw-r--r--js/src/ds/FixedLengthVector.h111
-rw-r--r--js/src/ds/IdValuePair.h35
-rw-r--r--js/src/ds/InlineTable.h644
-rw-r--r--js/src/ds/LifoAlloc.cpp425
-rw-r--r--js/src/ds/LifoAlloc.h1196
-rw-r--r--js/src/ds/Nestable.h63
-rw-r--r--js/src/ds/OrderedHashTable.h1100
-rw-r--r--js/src/ds/PointerAndUint7.h125
-rw-r--r--js/src/ds/PriorityQueue.h125
-rw-r--r--js/src/ds/SinglyLinkedList.h160
-rw-r--r--js/src/ds/Sort.h147
-rw-r--r--js/src/ds/TraceableFifo.h93
17 files changed, 5866 insertions, 0 deletions
diff --git a/js/src/ds/AvlTree.h b/js/src/ds/AvlTree.h
new file mode 100644
index 0000000000..eb31041c5c
--- /dev/null
+++ b/js/src/ds/AvlTree.h
@@ -0,0 +1,1041 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// The methods 'AvlTreeImpl::insert_worker' and 'AvlTreeImpl::delete_worker',
+// and all supporting methods reachable from them, are derived from a public
+// domain implementation by Georg Kraml. The public domain implementation in
+// C was translated into Rust and the Rust translation was later translated
+// into this C++ implementation.
+//
+// Unfortunately the relevant web site for the original C version is long
+// gone, and can only be found on the Wayback Machine:
+//
+// https://web.archive.org/web/20010419134337/
+// http://www.kraml.at/georg/avltree/index.html
+//
+// https://web.archive.org/web/20030926063347/
+// http://www.kraml.at/georg/avltree/avlmonolithic.c
+//
+// https://web.archive.org/web/20030401124003/http://www.kraml.at/src/howto/
+//
+// The intermediate Rust translation can be found at
+//
+// https://github.com/bytecodealliance/regalloc.rs/blob/main/lib/src/avl_tree.rs
+//
+// For relicensing clearances, see Mozilla bugs 1620332 and 1769261:
+//
+// https://bugzilla.mozilla.org/show_bug.cgi?id=1620332
+// https://bugzilla.mozilla.org/show_bug.cgi?id=1769261
+//
+// All other code in this file originates from Mozilla.
+
+#ifndef ds_AvlTree_h
+#define ds_AvlTree_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "ds/LifoAlloc.h"
+
+namespace js {
+
+////////////////////////////////////////////////////////////////////////
+// //
+// AvlTree implementation. For interface see `class AvlTree` below. //
+// //
+////////////////////////////////////////////////////////////////////////
+
+// An AVL tree class, with private allocator and node-recycling. `T` is the
+// class of elements in the tree. `C` must provide a method
+//
+// static int compare(const T&, const T&)
+//
+// to provide a total ordering on values `T` that are put into the tree,
+// returning -1 for less-than, 0 for equal, and 1 for greater-than.
+//
+// `C::compare` does not have to be a total ordering for *all* values of `T`,
+// but it must be so for the `T` values in the tree. Requests to insert
+// duplicate `T` values, as determined equal by `C::compare`, are valid but
+// will be ignored in this implementation class: the stored data is unchanged.
+// The interface class `AvlTree` however will MOZ_CRASH() on such requests.
+//
+// `T` values stored in the tree will not be explicitly freed or destroyed.
+//
+// For best cache-friendlyness, try to put the fields of `T` that are read by
+// your compare function at the end of `T`. See comment on `struct Node`
+// below.
+//
+// Some operations require (internally) building a stack of tree nodes from
+// the root to some leaf. The maximum stack size, and hence the maximum tree
+// depth, is currently bounded at 48. The max depth of an AVL tree is roughly
+// 1.44 * log2(# nodes), so providing the tree-balancing machinery works
+// correctly, the max number of nodes is at least 2^(48 / 1.44), somewhat over
+// 2^33 (= 8 G). On a 32-bit target we'll run out of address space long
+// before reaching that. On a 64-bit target, the minimum imaginable
+// sizeof(Node) is 16 (for the two pointers), so a tree with 2^33 nodes would
+// occupy at least 2^37 bytes, viz, around 137GB. So this seems unlikely to
+// be a limitation.
+//
+// All stack-pushing operations are release-asserted to not overflow the stack.
+
+template <class T, class C>
+class AvlTreeImpl {
+ // This is the implementation of AVL trees. If you want to know how to use
+ // them in your code, don't read this; instead look below at the public
+ // interface, that is, `class AvlTree`.
+ //
+ // All of `AvlTreeImpl`, apart from the iterator code at the bottom, is
+ // protected. Public facilities are provided by child class `AvlTree`.
+ protected:
+ // Tree node tags.
+ enum class Tag {
+ Free, // Node not in use -- is on the freelist.
+ None, // Node in use. Neither subtree is deeper.
+ Left, // Node in use. The left subtree is deeper.
+ Right, // Node in use. The right subtree is deeper.
+
+ Count, // Not used as an actual tag - should remain last in this list
+ };
+
+ // Tree nodes. The tag is stored in the lower bits of the right Node pointer
+ // rather than as a separate field. For types T with alignment no more than
+ // twice the size of a pointer (ie, most types), this reduces the size of Node
+ // and enables them to pack more tightly, reducing memory requirements and
+ // improving cache behavior. (See bug 1847616.)
+ struct Node {
+ T item;
+ Node* left;
+
+ // This is the mask to use to extract the tag from `rightAndTag`.
+ static constexpr uintptr_t kTagMask = 3;
+ static_assert(mozilla::IsPowerOfTwo(kTagMask + 1),
+ "kTagMask must only have a consecutive sequence of its "
+ "lowest bits set");
+ static_assert(
+ kTagMask >= static_cast<uintptr_t>(Tag::Count) - 1,
+ "kTagMask must be sufficient to cover largest value in 'Tag'");
+
+ private:
+ uintptr_t rightAndTag;
+
+ public:
+ explicit Node(const T& item)
+ : item(item),
+ left(nullptr),
+ rightAndTag(static_cast<uintptr_t>(Tag::None)) {}
+
+ [[nodiscard]] Node* getRight() const {
+ return reinterpret_cast<Node*>(rightAndTag & ~kTagMask);
+ }
+ [[nodiscard]] Tag getTag() const {
+ return static_cast<Tag>(rightAndTag & kTagMask);
+ }
+
+ void setRight(Node* right) {
+ rightAndTag =
+ reinterpret_cast<uintptr_t>(right) | static_cast<uintptr_t>(getTag());
+ }
+ void setTag(const Tag tag) {
+ rightAndTag = (rightAndTag & ~kTagMask) | static_cast<uintptr_t>(tag);
+ }
+ void setRightAndTag(Node* right, const Tag tag) {
+ const uintptr_t rightAsUint = reinterpret_cast<uintptr_t>(right);
+ rightAndTag = rightAsUint | static_cast<uintptr_t>(tag);
+ }
+ };
+
+ // Ensure that we can use the needed lower bits of a Node pointer to store the
+ // tag.
+ static_assert(alignof(Node) >= Node::kTagMask + 1);
+
+ // Once-per-tree components.
+ Node* root_;
+ Node* freeList_;
+ LifoAlloc* alloc_;
+
+ // As a modest but easy optimisation, ::allocateNode will allocate one node
+ // at the first call that sees an empty `freeList_`, two on the next such
+ // call and four on subsequent such calls. This has the effect of reducing
+ // the number of calls to the underlying allocator `alloc_` by a factor of 4
+ // for all but the smallest trees. It also helps pack more nodes into each
+ // cache line. The limit of 4 exists for three reasons:
+ //
+ // (1) It gains the majority (75%) of the available benefit from reducing
+ // the number of calls to `alloc_`, as the allocation size tends to
+ // infinity.
+ //
+ // (2) Similarly, 4 `struct Node`s will surely be greater than 128 bytes,
+ // hence there is minimal chance to use even fewer cache lines by increasing
+ // the group size further. In any case most machines have cache lines of
+ // size 64 bytes, not 128.
+ //
+ // (3) Most importantly, it limits the maximum potentially wasted space,
+ // which is the case where a request causes an allocation of N nodes, of
+ // which one is used immediately and the N-1 are put on the freelist, but
+ // then -- because the tree never grows larger -- are never used. Given
+ // that N=4 here, the worst case lossage is 3 nodes, which seems tolerable.
+ uint32_t nextAllocSize_; // 1, 2 or 4 only
+
+ // The expected maximum tree depth. See comments above.
+ static const size_t MAX_TREE_DEPTH = 48;
+
+ AvlTreeImpl(const AvlTreeImpl&) = delete;
+ AvlTreeImpl& operator=(const AvlTreeImpl&) = delete;
+
+ // ---- Preliminaries --------------------------------------- //
+
+ explicit AvlTreeImpl(LifoAlloc* alloc = nullptr)
+ : root_(nullptr), freeList_(nullptr), alloc_(alloc), nextAllocSize_(1) {}
+
+ void setAllocator(LifoAlloc* alloc) { alloc_ = alloc; }
+
+ // Put `node` onto the free list, for possible later reuse.
+ inline void addToFreeList(Node* node) {
+ node->left = freeList_;
+ node->setRightAndTag(nullptr, Tag::Free); // for safety
+ freeList_ = node;
+ }
+
+ // A safer version of `addToFreeList`.
+ inline void freeNode(Node* node) {
+ MOZ_ASSERT(node->getTag() != Tag::Free);
+ addToFreeList(node);
+ }
+
+ // This is the slow path for ::allocateNode below. Allocate 1, 2 or 4 nodes
+ // as a block, return the first one properly initialised, and put the rest
+ // on the freelist, in increasing order of address.
+ MOZ_NEVER_INLINE Node* allocateNodeOOL(const T& v) {
+ switch (nextAllocSize_) {
+ case 1: {
+ nextAllocSize_ = 2;
+ Node* node = alloc_->new_<Node>(v);
+ // `node` is either fully initialized, or nullptr on OOM.
+ return node;
+ }
+ case 2: {
+ nextAllocSize_ = 4;
+ Node* nodes = alloc_->newArrayUninitialized<Node>(2);
+ if (!nodes) {
+ return nullptr;
+ }
+ Node* node0 = &nodes[0];
+ addToFreeList(&nodes[1]);
+ new (node0) Node(v);
+ return node0;
+ }
+ case 4: {
+ Node* nodes = alloc_->newArrayUninitialized<Node>(4);
+ if (!nodes) {
+ return nullptr;
+ }
+ Node* node0 = &nodes[0];
+ addToFreeList(&nodes[3]);
+ addToFreeList(&nodes[2]);
+ addToFreeList(&nodes[1]);
+ new (node0) Node(v);
+ return node0;
+ }
+ default: {
+ MOZ_CRASH();
+ }
+ }
+ }
+
+ // Allocate a Node holding `v`, or return nullptr on OOM. All of the fields
+ // are initialized.
+ inline Node* allocateNode(const T& v) {
+ Node* node = freeList_;
+ if (MOZ_LIKELY(node)) {
+ MOZ_ASSERT(node->getTag() == Tag::Free);
+ freeList_ = node->left;
+ new (node) Node(v);
+ return node;
+ }
+ return allocateNodeOOL(v);
+ }
+
+ // These exist only transiently, to aid rebalancing. They indicate whether
+ // an insertion/deletion succeeded, whether subsequent rebalancing is
+ // needed.
+ enum class Result { Error, OK, Balance };
+
+ using NodeAndResult = std::pair<Node*, Result>;
+
+ // Standard AVL single-rotate-left
+ Node* rotate_left(Node* old_root) {
+ Node* new_root = old_root->getRight();
+ old_root->setRight(new_root->left);
+ new_root->left = old_root;
+ return new_root;
+ }
+
+ // Standard AVL single-rotate-right
+ Node* rotate_right(Node* old_root) {
+ Node* new_root = old_root->left;
+ old_root->left = new_root->getRight();
+ new_root->setRight(old_root);
+ return new_root;
+ }
+
+ // ---- Helpers for insertion ------------------------------- //
+
+ // `leftgrown`: a helper function for `insert_worker`
+ //
+ // Parameters:
+ //
+ // root Root of a tree. This node's left subtree has just grown due to
+ // item insertion; its "tag" flag needs adjustment, and the local
+ // tree (the subtree of which this node is the root node) may have
+ // become unbalanced.
+ //
+ // Return values:
+ //
+ // The new root of the subtree, plus either:
+ //
+ // OK The local tree could be rebalanced or was balanced from the
+ // start. The caller, insert_worker, may assume the entire tree
+ // is valid.
+ // or
+ // Balance The local tree was balanced, but has grown in height.
+ // Do not assume the entire tree is valid.
+ //
+ // This function has been split into two pieces: `leftgrown`, which is small
+ // and hot, and is marked always-inline, and `leftgrown_left`, which handles
+ // a more complex and less frequent case, and is marked never-inline. The
+ // intent is to have the common case always inlined without having to deal
+ // with the extra register pressure from inlining the less frequent code.
+ // The dual function `rightgrown` is split similarly.
+
+ MOZ_NEVER_INLINE Node* leftgrown_left(Node* root) {
+ if (root->left->getTag() == Tag::Left) {
+ root->setTag(Tag::None);
+ root->left->setTag(Tag::None);
+ root = rotate_right(root);
+ } else {
+ switch (root->left->getRight()->getTag()) {
+ case Tag::Left:
+ root->setTag(Tag::Right);
+ root->left->setTag(Tag::None);
+ break;
+ case Tag::Right:
+ root->setTag(Tag::None);
+ root->left->setTag(Tag::Left);
+ break;
+ case Tag::None:
+ root->setTag(Tag::None);
+ root->left->setTag(Tag::None);
+ break;
+ case Tag::Free:
+ default:
+ MOZ_CRASH();
+ }
+ root->left->getRight()->setTag(Tag::None);
+ root->left = rotate_left(root->left);
+ root = rotate_right(root);
+ }
+ return root;
+ }
+
+ inline NodeAndResult leftgrown(Node* root) {
+ switch (root->getTag()) {
+ case Tag::Left:
+ return NodeAndResult(leftgrown_left(root), Result::OK);
+ case Tag::Right:
+ root->setTag(Tag::None);
+ return NodeAndResult(root, Result::OK);
+ case Tag::None:
+ root->setTag(Tag::Left);
+ return NodeAndResult(root, Result::Balance);
+ case Tag::Free:
+ default:
+ break;
+ }
+ MOZ_CRASH();
+ }
+
+ // `rightgrown`: a helper function for `insert_worker`. See `leftgrown` for
+ // details.
+
+ MOZ_NEVER_INLINE Node* rightgrown_right(Node* root) {
+ if (root->getRight()->getTag() == Tag::Right) {
+ root->setTag(Tag::None);
+ root->getRight()->setTag(Tag::None);
+ root = rotate_left(root);
+ } else {
+ switch (root->getRight()->left->getTag()) {
+ case Tag::Right:
+ root->setTag(Tag::Left);
+ root->getRight()->setTag(Tag::None);
+ break;
+ case Tag::Left:
+ root->setTag(Tag::None);
+ root->getRight()->setTag(Tag::Right);
+ break;
+ case Tag::None:
+ root->setTag(Tag::None);
+ root->getRight()->setTag(Tag::None);
+ break;
+ case Tag::Free:
+ default:
+ MOZ_CRASH();
+ }
+ root->getRight()->left->setTag(Tag::None);
+ root->setRight(rotate_right(root->getRight()));
+ root = rotate_left(root);
+ }
+ return root;
+ }
+
+ inline NodeAndResult rightgrown(Node* root) {
+ switch (root->getTag()) {
+ case Tag::Left:
+ root->setTag(Tag::None);
+ return NodeAndResult(root, Result::OK);
+ case Tag::Right:
+ return NodeAndResult(rightgrown_right(root), Result::OK);
+ case Tag::None:
+ root->setTag(Tag::Right);
+ return NodeAndResult(root, Result::Balance);
+ case Tag::Free:
+ default:
+ break;
+ }
+ MOZ_CRASH();
+ }
+
+ // ---- Insertion ------------------------------------------- //
+
+ // Worker for insertion. Allocates a node for `v` and inserts it into the
+ // tree. Returns: nullptr for OOM; (Node*)1 if `v` is a duplicate (per
+ // `C::compare`), in which case the tree is unchanged; otherwise (successful
+ // insertion) the new root. In the latter case, the new `item` field is
+ // initialised from `v`.
+ Node* insert_worker(const T& v) {
+ // Insertion is a two pass process. In the first pass, we descend from
+ // the root, looking for the place in the tree where the new node will go,
+ // and at the same time storing the sequence of visited nodes in a stack.
+ // In the second phase we re-ascend the tree, as guided by the stack,
+ // rebalancing as we go.
+ //
+ // Note, we start from `root_`, but that isn't updated at the end. Instead
+ // the new value is returned to the caller, which has to do the update.
+
+ Node* stack[MAX_TREE_DEPTH];
+ size_t stackPtr = 0; // points to next available slot
+
+#define STACK_ENTRY_SET_IS_LEFT(_nodePtr) \
+ ((Node*)(uintptr_t(_nodePtr) | uintptr_t(1)))
+#define STACK_ENTRY_GET_IS_LEFT(_ent) ((bool)(uintptr_t(_ent) & uintptr_t(1)))
+#define STACK_ENTRY_GET_NODE(_ent) ((Node*)(uintptr_t(_ent) & ~uintptr_t(1)))
+
+ // In the first phase, walk down the tree to find the place where the new
+ // node should be inserted, recording our path in `stack`. This loop has
+ // a factor-of-2 unrolling (the loop body contains two logical iterations)
+ // in order to reduce the overall cost of the stack-overflow check at the
+ // bottom.
+ Node* node = root_;
+ while (true) {
+ // First logical iteration
+ if (!node) {
+ break;
+ }
+ int cmpRes1 = C::compare(v, node->item);
+ if (cmpRes1 < 0) {
+ stack[stackPtr++] = STACK_ENTRY_SET_IS_LEFT(node);
+ node = node->left;
+ } else if (cmpRes1 > 0) {
+ stack[stackPtr++] = node;
+ node = node->getRight();
+ } else {
+ // `v` is already in the tree. Inform the caller, and don't change
+ // the tree.
+ return (Node*)(uintptr_t(1));
+ }
+ // Second logical iteration
+ if (!node) {
+ break;
+ }
+ int cmpRes2 = C::compare(v, node->item);
+ if (cmpRes2 < 0) {
+ stack[stackPtr++] = STACK_ENTRY_SET_IS_LEFT(node);
+ node = node->left;
+ } else if (cmpRes2 > 0) {
+ stack[stackPtr++] = node;
+ node = node->getRight();
+ } else {
+ return (Node*)(uintptr_t(1));
+ }
+ // We're going around again. Ensure there are at least two available
+ // stack slots.
+ MOZ_RELEASE_ASSERT(stackPtr < MAX_TREE_DEPTH - 2);
+ }
+ MOZ_ASSERT(!node);
+
+ // Now allocate the new node.
+ Node* new_node = allocateNode(v);
+ if (!new_node) {
+ return nullptr; // OOM
+ }
+
+ // And unwind the stack, back to the root, rebalancing as we go. Once get
+ // to a place where the new subtree doesn't need to be rebalanced, we can
+ // stop this upward scan, because no nodes above it will need to be
+ // rebalanced either.
+ Node* curr_node = new_node;
+ Result curr_node_action = Result::Balance;
+
+ while (stackPtr > 0) {
+ Node* parent_node_tagged = stack[--stackPtr];
+ Node* parent_node = STACK_ENTRY_GET_NODE(parent_node_tagged);
+ if (STACK_ENTRY_GET_IS_LEFT(parent_node_tagged)) {
+ parent_node->left = curr_node;
+ if (curr_node_action == Result::Balance) {
+ auto pair = leftgrown(parent_node);
+ curr_node = pair.first;
+ curr_node_action = pair.second;
+ } else {
+ curr_node = parent_node;
+ break;
+ }
+ } else {
+ parent_node->setRight(curr_node);
+ if (curr_node_action == Result::Balance) {
+ auto pair = rightgrown(parent_node);
+ curr_node = pair.first;
+ curr_node_action = pair.second;
+ } else {
+ curr_node = parent_node;
+ break;
+ }
+ }
+ }
+
+ if (stackPtr > 0) {
+ curr_node = STACK_ENTRY_GET_NODE(stack[0]);
+ }
+ MOZ_ASSERT(curr_node);
+
+#undef STACK_ENTRY_SET_IS_LEFT
+#undef STACK_ENTRY_GET_IS_LEFT
+#undef STACK_ENTRY_GET_NODE
+ return curr_node;
+ }
+
+ // ---- Helpers for deletion -------------------------------- //
+
+ // `leftshrunk`: a helper function for `delete_worker` and `findlowest`
+ //
+ // Parameters:
+ //
+ // n Pointer to a node. The node's left subtree has just shrunk due to
+ // item removal; its "skew" flag needs adjustment, and the local tree
+ // (the subtree of which this node is the root node) may have become
+ // unbalanced.
+ //
+ // Return values:
+ //
+ // (jseward: apparently some node, but what is it?), plus either:
+ //
+ // OK The parent activation of the delete activation that called
+ // this function may assume the entire tree is valid.
+ //
+ // Balance Do not assume the entire tree is valid.
+
+ NodeAndResult leftshrunk(Node* n) {
+ switch (n->getTag()) {
+ case Tag::Left: {
+ n->setTag(Tag::None);
+ return NodeAndResult(n, Result::Balance);
+ }
+ case Tag::Right: {
+ if (n->getRight()->getTag() == Tag::Right) {
+ n->setTag(Tag::None);
+ n->getRight()->setTag(Tag::None);
+ n = rotate_left(n);
+ return NodeAndResult(n, Result::Balance);
+ } else if (n->getRight()->getTag() == Tag::None) {
+ n->setTag(Tag::Right);
+ n->getRight()->setTag(Tag::Left);
+ n = rotate_left(n);
+ return NodeAndResult(n, Result::OK);
+ } else {
+ switch (n->getRight()->left->getTag()) {
+ case Tag::Left:
+ n->setTag(Tag::None);
+ n->getRight()->setTag(Tag::Right);
+ break;
+ case Tag::Right:
+ n->setTag(Tag::Left);
+ n->getRight()->setTag(Tag::None);
+ break;
+ case Tag::None:
+ n->setTag(Tag::None);
+ n->getRight()->setTag(Tag::None);
+ break;
+ case Tag::Free:
+ default:
+ MOZ_CRASH();
+ }
+ n->getRight()->left->setTag(Tag::None);
+ n->setRight(rotate_right(n->getRight()));
+ ;
+ n = rotate_left(n);
+ return NodeAndResult(n, Result::Balance);
+ }
+ /*NOTREACHED*/ MOZ_CRASH();
+ }
+ case Tag::None: {
+ n->setTag(Tag::Right);
+ return NodeAndResult(n, Result::OK);
+ }
+ case Tag::Free:
+ default: {
+ MOZ_CRASH();
+ }
+ }
+ MOZ_CRASH();
+ }
+
+ // rightshrunk: a helper function for `delete` and `findhighest`. See
+ // `leftshrunk` for details.
+
+ NodeAndResult rightshrunk(Node* n) {
+ switch (n->getTag()) {
+ case Tag::Right: {
+ n->setTag(Tag::None);
+ return NodeAndResult(n, Result::Balance);
+ }
+ case Tag::Left: {
+ if (n->left->getTag() == Tag::Left) {
+ n->setTag(Tag::None);
+ n->left->setTag(Tag::None);
+ n = rotate_right(n);
+ return NodeAndResult(n, Result::Balance);
+ } else if (n->left->getTag() == Tag::None) {
+ n->setTag(Tag::Left);
+ n->left->setTag(Tag::Right);
+ n = rotate_right(n);
+ return NodeAndResult(n, Result::OK);
+ } else {
+ switch (n->left->getRight()->getTag()) {
+ case Tag::Left:
+ n->setTag(Tag::Right);
+ n->left->setTag(Tag::None);
+ break;
+ case Tag::Right:
+ n->setTag(Tag::None);
+ n->left->setTag(Tag::Left);
+ break;
+ case Tag::None:
+ n->setTag(Tag::None);
+ n->left->setTag(Tag::None);
+ break;
+ case Tag::Free:
+ default:
+ MOZ_CRASH();
+ }
+ n->left->getRight()->setTag(Tag::None);
+ n->left = rotate_left(n->left);
+ n = rotate_right(n);
+ return NodeAndResult(n, Result::Balance);
+ }
+ /*NOTREACHED*/ MOZ_CRASH();
+ }
+ case Tag::None: {
+ n->setTag(Tag::Left);
+ return NodeAndResult(n, Result::OK);
+ }
+ case Tag::Free:
+ default: {
+ MOZ_CRASH();
+ }
+ }
+ MOZ_CRASH();
+ }
+
+ // `findhighest`: helper function for `delete_worker`. It replaces a node
+ // with a subtree's greatest (per C::compare) item.
+ //
+ // Parameters:
+ //
+ // target Pointer to node to be replaced.
+ //
+ // n Address of pointer to subtree.
+ //
+ // Return value:
+ //
+ // Nothing The target node could not be replaced because the subtree
+ // provided was empty.
+ //
+ // Some(Node*,Result) jseward: it's pretty unclear, but I *think* it
+ // is a pair that has the same meaning as the
+ // pair returned by `leftgrown`, as described above.
+
+ mozilla::Maybe<NodeAndResult> findhighest(Node* target, Node* n) {
+ if (n == nullptr) {
+ return mozilla::Nothing();
+ }
+ auto res = Result::Balance;
+ if (n->getRight() != nullptr) {
+ auto fhi = findhighest(target, n->getRight());
+ if (fhi.isSome()) {
+ n->setRight(fhi.value().first);
+ res = fhi.value().second;
+ if (res == Result::Balance) {
+ auto pair = rightshrunk(n);
+ n = pair.first;
+ res = pair.second;
+ }
+ return mozilla::Some(NodeAndResult(n, res));
+ } else {
+ return mozilla::Nothing();
+ }
+ }
+ target->item = n->item;
+ Node* tmp = n;
+ n = n->left;
+ freeNode(tmp);
+ return mozilla::Some(NodeAndResult(n, res));
+ }
+
+ // `findhighest`: helper function for `delete_worker`. It replaces a node
+ // with a subtree's greatest (per C::compare) item. See `findhighest` for
+ // details.
+
+ mozilla::Maybe<NodeAndResult> findlowest(Node* target, Node* n) {
+ if (n == nullptr) {
+ return mozilla::Nothing();
+ }
+ Result res = Result::Balance;
+ if (n->left != nullptr) {
+ auto flo = findlowest(target, n->left);
+ if (flo.isSome()) {
+ n->left = flo.value().first;
+ res = flo.value().second;
+ if (res == Result::Balance) {
+ auto pair = leftshrunk(n);
+ n = pair.first;
+ res = pair.second;
+ }
+ return mozilla::Some(NodeAndResult(n, res));
+ } else {
+ return mozilla::Nothing();
+ }
+ }
+ target->item = n->item;
+ Node* tmp = n;
+ n = n->getRight();
+ freeNode(tmp);
+ return mozilla::Some(NodeAndResult(n, res));
+ }
+
+ // ---- Deletion -------------------------------------------- //
+
+ // Deletes the node matching `item` from an arbitrary subtree rooted at
+ // `node`. Returns the root of the new subtree (if any), a `Result` that
+ // indicates that either, the tree containing `node` does or does not need
+ // rebalancing (::Balance, ::OK) or that the item was not found (::Error).
+
+ NodeAndResult delete_worker(Node* node, const T& item) {
+ Result tmp = Result::Balance;
+ if (node == nullptr) {
+ return NodeAndResult(node, Result::Error);
+ }
+
+ int cmp_res = C::compare(item, node->item);
+ if (cmp_res < 0) {
+ auto pair1 = delete_worker(node->left, item);
+ node->left = pair1.first;
+ tmp = pair1.second;
+ if (tmp == Result::Balance) {
+ auto pair2 = leftshrunk(node);
+ node = pair2.first;
+ tmp = pair2.second;
+ }
+ return NodeAndResult(node, tmp);
+ } else if (cmp_res > 0) {
+ auto pair1 = delete_worker(node->getRight(), item);
+ node->setRight(pair1.first);
+ tmp = pair1.second;
+ if (tmp == Result::Balance) {
+ auto pair2 = rightshrunk(node);
+ node = pair2.first;
+ tmp = pair2.second;
+ }
+ return NodeAndResult(node, tmp);
+ } else {
+ if (node->left != nullptr) {
+ auto fhi = findhighest(node, node->left);
+ if (fhi.isSome()) {
+ node->left = fhi.value().first;
+ tmp = fhi.value().second;
+ if (tmp == Result::Balance) {
+ auto pair = leftshrunk(node);
+ node = pair.first;
+ tmp = pair.second;
+ }
+ }
+ return NodeAndResult(node, tmp);
+ }
+ if (node->getRight() != nullptr) {
+ auto flo = findlowest(node, node->getRight());
+ if (flo.isSome()) {
+ node->setRight(flo.value().first);
+ tmp = flo.value().second;
+ if (tmp == Result::Balance) {
+ auto pair = rightshrunk(node);
+ node = pair.first;
+ tmp = pair.second;
+ }
+ }
+ return NodeAndResult(node, tmp);
+ }
+ freeNode(node);
+ return NodeAndResult(nullptr, Result::Balance);
+ }
+ }
+
+ // ---- Lookup ---------------------------------------------- //
+
+ // Find the node matching `v`, or return nullptr if not found.
+ Node* find_worker(const T& v) const {
+ Node* node = root_;
+ while (node) {
+ int cmpRes = C::compare(v, node->item);
+ if (cmpRes < 0) {
+ node = node->left;
+ } else if (cmpRes > 0) {
+ node = node->getRight();
+ } else {
+ return node;
+ }
+ }
+ return nullptr;
+ }
+
+ // ---- Iteration ------------------------------------------- //
+
+ public:
+ // This provides iteration forwards over the tree. You can either iterate
+ // over the whole tree or specify a start point. To iterate over the whole
+ // tree:
+ //
+ // AvlTree<MyT,MyC> tree;
+ // .. put stuff into `tree` ..
+ //
+ // AvlTree<MyT,MyC>::Iter iter(&tree);
+ // while (iter.hasMore) {
+ // MyT item = iter.next();
+ // }
+ //
+ // Alternatively you can initialize the iterator with some value `startAt`,
+ // so that the first value you get is greater than or equal to `startAt`,
+ // per `MyC::compare`:
+ //
+ // AvlTree<MyT,MyC>::Iter iter(&tree, startAt);
+ //
+ // Starting the iterator at a particular value requires finding the value in
+ // the tree and recording the path to it. So it's nearly as cheap as a call
+ // to `AvlTree::contains` and you can use it as a plausible substitute for
+ // `::contains` if you want.
+ //
+ // Note that `class Iter` is quite large -- around 50 machine words -- so
+ // you might want to think twice before allocating instances on the heap.
+ class Iter {
+ const AvlTreeImpl<T, C>* tree_;
+ Node* stack_[MAX_TREE_DEPTH];
+ size_t stackPtr_;
+
+ // This sets up the iterator stack so that the first value it produces
+ // will be the smallest value that is greater than or equal to `v`. Note
+ // the structural similarity to ::find_worker above.
+ //
+ // The logic for pushing nodes on the stack looks strange at first. Once
+ // set up, the stack contains a root-to-some-node path, and the
+ // top-of-stack value is the next value the iterator will emit. If the
+ // stack becomes empty then the iteration is complete.
+ //
+ // It's not quite accurate to say that the stack contains a complete
+ // root-to-some-node path. Rather, the stack contains such a path, except
+ // it omits nodes at which the path goes to the right child. Eg:
+ //
+ // 5
+ // 3 8
+ // 1 4 7 9
+ //
+ // If the next item to be emitted is 4, then the stack will be [5, 4] and
+ // not [5, 3, 4], because at 3 we go right. This explains why the
+ // `cmpRes > 0` case in `setupIteratorStack` doesn't push an item on the
+ // stack. It also explains why the single-argument `Iter::Iter` below,
+ // which sets up for iteration starting at the lowest element, simply
+ // calls `visitLeftChildren` to do its work.
+ void setupIteratorStack(Node* node, const T& v) {
+ // Ensure stackPtr_ is cached in a register, since this function can be
+ // hot.
+ MOZ_ASSERT(stackPtr_ == 0);
+ size_t stackPtr = 0;
+ while (node) {
+ int cmpRes = C::compare(v, node->item);
+ if (cmpRes < 0) {
+ stack_[stackPtr++] = node;
+ MOZ_RELEASE_ASSERT(stackPtr < MAX_TREE_DEPTH);
+ node = node->left;
+ } else if (cmpRes > 0) {
+ node = node->getRight();
+ } else {
+ stack_[stackPtr++] = node;
+ MOZ_RELEASE_ASSERT(stackPtr < MAX_TREE_DEPTH);
+ break;
+ }
+ }
+ stackPtr_ = stackPtr;
+ }
+
+ void visitLeftChildren(Node* node) {
+ while (true) {
+ Node* left = node->left;
+ if (left == nullptr) {
+ break;
+ }
+ stack_[stackPtr_++] = left;
+ MOZ_RELEASE_ASSERT(stackPtr_ < MAX_TREE_DEPTH);
+ node = left;
+ }
+ }
+
+ public:
+ explicit Iter(const AvlTreeImpl<T, C>* tree) {
+ tree_ = tree;
+ stackPtr_ = 0;
+ if (tree->root_ != nullptr) {
+ stack_[stackPtr_++] = tree->root_;
+ MOZ_RELEASE_ASSERT(stackPtr_ < MAX_TREE_DEPTH);
+ visitLeftChildren(tree->root_);
+ }
+ }
+ Iter(const AvlTreeImpl<T, C>* tree, const T& startAt) {
+ tree_ = tree;
+ stackPtr_ = 0;
+ setupIteratorStack(tree_->root_, startAt);
+ }
+ bool hasMore() const { return stackPtr_ > 0; }
+ T next() {
+ MOZ_RELEASE_ASSERT(stackPtr_ > 0);
+ Node* ret = stack_[--stackPtr_];
+ Node* right = ret->getRight();
+ if (right != nullptr) {
+ stack_[stackPtr_++] = right;
+ MOZ_RELEASE_ASSERT(stackPtr_ < MAX_TREE_DEPTH);
+ visitLeftChildren(right);
+ }
+ return ret->item;
+ }
+ };
+};
+
+////////////////////////////////////////////////////////////////////////
+// //
+// AvlTree public interface, for SpiderMonkey. //
+// //
+////////////////////////////////////////////////////////////////////////
+
+// This public interface is fairly limited and restrictive. If you need to
+// add more functionality, consider copying code from `class AvlTreeTestIF` in
+// js/src/jsapi-tests/testAvlTree.cpp rather than rolling your own. See
+// comments there.
+
+template <class T, class C>
+class AvlTree : public AvlTreeImpl<T, C> {
+ // Shorthands for names in the implementation (parent) class.
+ using Impl = AvlTreeImpl<T, C>;
+ using ImplNode = typename AvlTreeImpl<T, C>::Node;
+ using ImplResult = typename AvlTreeImpl<T, C>::Result;
+ using ImplNodeAndResult = typename AvlTreeImpl<T, C>::NodeAndResult;
+
+ public:
+ explicit AvlTree(LifoAlloc* alloc = nullptr) : Impl(alloc) {}
+
+ // You'll need to tell the tree how to allocate nodes, either here or in
+ // `AvlTree::AvlTree`.
+ void setAllocator(LifoAlloc* alloc) { Impl::setAllocator(alloc); }
+
+ // Is the tree empty?
+ bool empty() const { return Impl::root_ == nullptr; }
+
+ // Insert `v` in the tree. Returns false to indicate OOM. `v` may not be
+ // equal to any existing value in the tree, per `C::compare`; if it is, this
+ // routine will MOZ_CRASH(). It would be trivial to change this to replace
+ // an existing value instead, if needed.
+ [[nodiscard]] bool insert(const T& v) {
+ ImplNode* new_root = Impl::insert_worker(v);
+ // Take out both unlikely cases with a single comparison.
+ if (MOZ_UNLIKELY(uintptr_t(new_root) <= uintptr_t(1))) {
+ // OOM (new_root == 0) or duplicate (new_root == 1)
+ if (!new_root) {
+ // OOM
+ return false;
+ }
+ // Duplicate; tree is unchanged.
+ MOZ_CRASH();
+ }
+ Impl::root_ = new_root;
+ return true;
+ }
+
+ // Remove `v` from the tree. `v` must actually be in the tree, per
+ // `C::compare`. If it is not, this routine will MOZ_CRASH().
+ // Superficially it looks like we could change it to return without doing
+ // anything in that case, if needed, except we'd need to first verify that
+ // `delete_worker` doesn't change the tree in that case.
+ void remove(const T& v) {
+ ImplNodeAndResult pair = Impl::delete_worker(Impl::root_, v);
+ ImplNode* new_root = pair.first;
+ ImplResult res = pair.second;
+ if (MOZ_UNLIKELY(res == ImplResult::Error)) {
+ // `v` isn't in the tree.
+ MOZ_CRASH();
+ } else {
+ Impl::root_ = new_root;
+ }
+ }
+
+ // Determine whether the tree contains `v` and if so return, in `res`, a
+ // copy of the stored version. Note that the determination is done using
+ // `C::compare` and you should consider carefully the consequences of
+ // passing in `v` for which `C::compare` indicates "equal" for more than one
+ // value in the tree. This is not invalid, but it does mean that you may be
+ // returned, via `res`, *any* of the values in the tree that `compare` deems
+ // equal to `v`, and which you get is arbitrary -- it depends on which is
+ // closest to the root.
+ bool contains(const T& v, T* res) const {
+ ImplNode* node = Impl::find_worker(v);
+ if (node) {
+ *res = node->item;
+ return true;
+ }
+ return false;
+ }
+
+ // Determine whether the tree contains `v` and if so return the address of
+ // the stored version. The comments on `::contains` about the meaning of
+ // `C::compare` apply here too.
+ T* maybeLookup(const T& v) {
+ ImplNode* node = Impl::find_worker(v);
+ if (node) {
+ return &(node->item);
+ }
+ return nullptr;
+ }
+
+ // AvlTree::Iter is also public; it's just pass-through from AvlTreeImpl.
+ // See documentation above on AvlTree::Iter on how to use it.
+};
+
+} /* namespace js */
+
+#endif /* ds_AvlTree_h */
diff --git a/js/src/ds/BitArray.h b/js/src/ds/BitArray.h
new file mode 100644
index 0000000000..bdd78873fd
--- /dev/null
+++ b/js/src/ds/BitArray.h
@@ -0,0 +1,114 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_BitArray_h
+#define ds_BitArray_h
+
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/TemplateLib.h"
+
+#include <limits.h>
+#include <string.h>
+
+#include "jstypes.h"
+
+namespace js {
+
+namespace detail {
+
+template <typename WordT>
+inline uint_fast8_t CountTrailingZeroes(WordT word);
+
+template <>
+inline uint_fast8_t CountTrailingZeroes(uint32_t word) {
+ return mozilla::CountTrailingZeroes32(word);
+}
+
+template <>
+inline uint_fast8_t CountTrailingZeroes(uint64_t word) {
+ return mozilla::CountTrailingZeroes64(word);
+}
+
+} // namespace detail
+
+template <size_t nbits>
+class BitArray {
+ public:
+ // Use a 32 bit word to make it easier to access a BitArray from JIT code.
+ using WordT = uint32_t;
+
+ static const size_t bitsPerElement = sizeof(WordT) * CHAR_BIT;
+ static const size_t numSlots =
+ nbits / bitsPerElement + (nbits % bitsPerElement == 0 ? 0 : 1);
+
+ private:
+ static const size_t paddingBits = (numSlots * bitsPerElement) - nbits;
+ static_assert(paddingBits < bitsPerElement,
+ "More padding bits than expected.");
+ static const WordT paddingMask = WordT(-1) >> paddingBits;
+
+ WordT map[numSlots];
+
+ public:
+ constexpr BitArray() : map(){};
+
+ void clear(bool value) {
+ memset(map, value ? 0xFF : 0, sizeof(map));
+ if (value) {
+ map[numSlots - 1] &= paddingMask;
+ }
+ }
+
+ inline bool get(size_t offset) const {
+ size_t index;
+ WordT mask;
+ getIndexAndMask(offset, &index, &mask);
+ MOZ_ASSERT(index < nbits);
+ return map[index] & mask;
+ }
+
+ void set(size_t offset) {
+ size_t index;
+ WordT mask;
+ getIndexAndMask(offset, &index, &mask);
+ map[index] |= mask;
+ }
+
+ void unset(size_t offset) {
+ size_t index;
+ WordT mask;
+ getIndexAndMask(offset, &index, &mask);
+ map[index] &= ~mask;
+ }
+
+ bool isAllClear() const {
+ for (size_t i = 0; i < numSlots; i++) {
+ if (map[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // For iterating over the set bits in the bit array, get a word at a time.
+ WordT getWord(size_t elementIndex) const {
+ MOZ_ASSERT(elementIndex < nbits);
+ return map[elementIndex];
+ }
+
+ static void getIndexAndMask(size_t offset, size_t* indexp, WordT* maskp) {
+ MOZ_ASSERT(offset < nbits);
+ static_assert(bitsPerElement == 32, "unexpected bitsPerElement value");
+ *indexp = offset / bitsPerElement;
+ *maskp = WordT(1) << (offset % bitsPerElement);
+ }
+
+ static size_t offsetOfMap() { return offsetof(BitArray<nbits>, map); }
+};
+
+} /* namespace js */
+
+#endif /* ds_BitArray_h */
diff --git a/js/src/ds/Bitmap.cpp b/js/src/ds/Bitmap.cpp
new file mode 100644
index 0000000000..d96cf67c71
--- /dev/null
+++ b/js/src/ds/Bitmap.cpp
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ds/Bitmap.h"
+
+#include <algorithm>
+
+#include "js/UniquePtr.h"
+
+using namespace js;
+
+SparseBitmap::~SparseBitmap() {
+ for (Data::Range r(data.all()); !r.empty(); r.popFront()) {
+ js_delete(r.front().value());
+ }
+}
+
+size_t SparseBitmap::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ size_t size = data.shallowSizeOfExcludingThis(mallocSizeOf);
+ for (Data::Range r(data.all()); !r.empty(); r.popFront()) {
+ size += mallocSizeOf(r.front().value());
+ }
+ return size;
+}
+
+SparseBitmap::BitBlock* SparseBitmap::createBlock(Data::AddPtr p,
+ size_t blockId) {
+ MOZ_ASSERT(!p);
+ auto block = js::MakeUnique<BitBlock>();
+ if (!block || !data.add(p, blockId, block.get())) {
+ return nullptr;
+ }
+ std::fill(block->begin(), block->end(), 0);
+ return block.release();
+}
+
+SparseBitmap::BitBlock& SparseBitmap::createBlock(
+ Data::AddPtr p, size_t blockId, AutoEnterOOMUnsafeRegion& oomUnsafe) {
+ BitBlock* block = createBlock(p, blockId);
+ if (!block) {
+ oomUnsafe.crash("Bitmap OOM");
+ }
+ return *block;
+}
+
+bool SparseBitmap::getBit(size_t bit) const {
+ size_t word = bit / JS_BITS_PER_WORD;
+ size_t blockWord = blockStartWord(word);
+
+ const BitBlock* block = getBlock(blockWord / WordsInBlock);
+ if (block) {
+ return (*block)[word - blockWord] & bitMask(bit);
+ }
+ return false;
+}
+
+bool SparseBitmap::readonlyThreadsafeGetBit(size_t bit) const {
+ size_t word = bit / JS_BITS_PER_WORD;
+ size_t blockWord = blockStartWord(word);
+
+ const BitBlock* block = readonlyThreadsafeGetBlock(blockWord / WordsInBlock);
+ if (block) {
+ return (*block)[word - blockWord] & bitMask(bit);
+ }
+ return false;
+}
+
+void SparseBitmap::bitwiseAndWith(const DenseBitmap& other) {
+ for (Data::Enum e(data); !e.empty(); e.popFront()) {
+ BitBlock& block = *e.front().value();
+ size_t blockWord = e.front().key() * WordsInBlock;
+ bool anySet = false;
+ size_t numWords = wordIntersectCount(blockWord, other);
+ for (size_t i = 0; i < numWords; i++) {
+ block[i] &= other.word(blockWord + i);
+ anySet |= !!block[i];
+ }
+ if (!anySet) {
+ js_delete(&block);
+ e.removeFront();
+ }
+ }
+}
+
+void SparseBitmap::bitwiseOrWith(const SparseBitmap& other) {
+ for (Data::Range r(other.data.all()); !r.empty(); r.popFront()) {
+ const BitBlock& otherBlock = *r.front().value();
+ BitBlock& block = getOrCreateBlock(r.front().key());
+ for (size_t i = 0; i < WordsInBlock; i++) {
+ block[i] |= otherBlock[i];
+ }
+ }
+}
+
+void SparseBitmap::bitwiseOrInto(DenseBitmap& other) const {
+ for (Data::Range r(data.all()); !r.empty(); r.popFront()) {
+ BitBlock& block = *r.front().value();
+ size_t blockWord = r.front().key() * WordsInBlock;
+ size_t numWords = wordIntersectCount(blockWord, other);
+#ifdef DEBUG
+ // Any words out of range in other should be zero in this bitmap.
+ for (size_t i = numWords; i < WordsInBlock; i++) {
+ MOZ_ASSERT(!block[i]);
+ }
+#endif
+ for (size_t i = 0; i < numWords; i++) {
+ other.word(blockWord + i) |= block[i];
+ }
+ }
+}
diff --git a/js/src/ds/Bitmap.h b/js/src/ds/Bitmap.h
new file mode 100644
index 0000000000..6770585a61
--- /dev/null
+++ b/js/src/ds/Bitmap.h
@@ -0,0 +1,187 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_Bitmap_h
+#define ds_Bitmap_h
+
+#include "mozilla/Array.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/MemoryChecking.h"
+
+#include <algorithm>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "js/AllocPolicy.h"
+#include "js/HashTable.h"
+#include "js/HeapAPI.h"
+#include "js/Vector.h"
+
+// This file provides two classes for representing bitmaps.
+//
+// DenseBitmap is an array of words of bits, with size linear in the maximum
+// bit which has been set on it.
+//
+// SparseBitmap provides a reasonably simple, reasonably efficient (in time and
+// space) implementation of a sparse bitmap. The basic representation is a hash
+// table whose entries are fixed length malloc'ed blocks of bits.
+
+namespace js {
+
+class DenseBitmap {
+ using Data = Vector<uintptr_t, 0, SystemAllocPolicy>;
+
+ Data data;
+
+ public:
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return data.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ bool ensureSpace(size_t numWords) {
+ MOZ_ASSERT(data.empty());
+ return data.appendN(0, numWords);
+ }
+
+ size_t numWords() const { return data.length(); }
+ uintptr_t word(size_t i) const { return data[i]; }
+ uintptr_t& word(size_t i) { return data[i]; }
+
+ template <typename T>
+ typename std::enable_if_t<std::is_convertible_v<T, uintptr_t>, void>
+ copyBitsFrom(size_t wordStart, size_t numWords, T* source) {
+ MOZ_ASSERT(wordStart + numWords <= data.length());
+ for (size_t i = 0; i < numWords; i++) {
+ data[wordStart + i] = source[i];
+ }
+ }
+
+ template <typename T>
+ typename std::enable_if_t<std::is_convertible_v<T, uintptr_t>, void>
+ bitwiseOrRangeInto(size_t wordStart, size_t numWords, T* target) const {
+ for (size_t i = 0; i < numWords; i++) {
+ target[i] |= data[wordStart + i];
+ }
+ }
+};
+
+class SparseBitmap {
+ // The number of words of bits to use for each block mainly affects the
+ // memory usage of the bitmap. To minimize overhead, bitmaps which are
+ // expected to be fairly dense should have a large block size, and bitmaps
+ // which are expected to be very sparse should have a small block size.
+ static const size_t WordsInBlock = 4096 / sizeof(uintptr_t);
+
+ using BitBlock = mozilla::Array<uintptr_t, WordsInBlock>;
+ using Data =
+ HashMap<size_t, BitBlock*, DefaultHasher<size_t>, SystemAllocPolicy>;
+
+ Data data;
+
+ MOZ_ALWAYS_INLINE static size_t blockStartWord(size_t word) {
+ return word & ~(WordsInBlock - 1);
+ }
+
+ MOZ_ALWAYS_INLINE static uintptr_t bitMask(size_t bit) {
+ return uintptr_t(1) << (bit % JS_BITS_PER_WORD);
+ }
+
+ // Return the number of words in a BitBlock starting at |blockWord| which
+ // are in |other|.
+ static size_t wordIntersectCount(size_t blockWord, const DenseBitmap& other) {
+ long count = other.numWords() - blockWord;
+ return std::min<size_t>((size_t)WordsInBlock, std::max<long>(count, 0));
+ }
+
+ BitBlock& createBlock(Data::AddPtr p, size_t blockId,
+ AutoEnterOOMUnsafeRegion& oomUnsafe);
+
+ BitBlock* createBlock(Data::AddPtr p, size_t blockId);
+
+ MOZ_ALWAYS_INLINE BitBlock* getBlock(size_t blockId) const {
+ Data::Ptr p = data.lookup(blockId);
+ return p ? p->value() : nullptr;
+ }
+
+ MOZ_ALWAYS_INLINE const BitBlock* readonlyThreadsafeGetBlock(
+ size_t blockId) const {
+ Data::Ptr p = data.readonlyThreadsafeLookup(blockId);
+ return p ? p->value() : nullptr;
+ }
+
+ MOZ_ALWAYS_INLINE BitBlock& getOrCreateBlock(size_t blockId) {
+ // The lookupForAdd() needs protection against injected OOMs, as does
+ // the add() within createBlock().
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Data::AddPtr p = data.lookupForAdd(blockId);
+ if (p) {
+ return *p->value();
+ }
+ return createBlock(p, blockId, oomUnsafe);
+ }
+
+ MOZ_ALWAYS_INLINE BitBlock* getOrCreateBlockFallible(size_t blockId) {
+ Data::AddPtr p = data.lookupForAdd(blockId);
+ if (p) {
+ return p->value();
+ }
+ return createBlock(p, blockId);
+ }
+
+ public:
+ ~SparseBitmap();
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+
+ MOZ_ALWAYS_INLINE void setBit(size_t bit) {
+ size_t word = bit / JS_BITS_PER_WORD;
+ size_t blockWord = blockStartWord(word);
+ BitBlock& block = getOrCreateBlock(blockWord / WordsInBlock);
+ block[word - blockWord] |= bitMask(bit);
+ }
+
+ MOZ_ALWAYS_INLINE bool setBitFallible(size_t bit) {
+ size_t word = bit / JS_BITS_PER_WORD;
+ size_t blockWord = blockStartWord(word);
+ BitBlock* block = getOrCreateBlockFallible(blockWord / WordsInBlock);
+ if (!block) {
+ return false;
+ }
+ (*block)[word - blockWord] |= bitMask(bit);
+ return true;
+ }
+
+ bool getBit(size_t bit) const;
+ bool readonlyThreadsafeGetBit(size_t bit) const;
+
+ void bitwiseAndWith(const DenseBitmap& other);
+ void bitwiseOrWith(const SparseBitmap& other);
+ void bitwiseOrInto(DenseBitmap& other) const;
+
+ // Currently, this API only supports a range of words that is in a single bit
+ // block.
+ template <typename T>
+ typename std::enable_if_t<std::is_convertible_v<T, uintptr_t>, void>
+ bitwiseOrRangeInto(size_t wordStart, size_t numWords, T* target) const {
+ size_t blockWord = blockStartWord(wordStart);
+
+ // We only support using a single bit block in this API.
+ MOZ_ASSERT(numWords &&
+ (blockWord == blockStartWord(wordStart + numWords - 1)));
+
+ BitBlock* block = getBlock(blockWord / WordsInBlock);
+ if (block) {
+ for (size_t i = 0; i < numWords; i++) {
+ target[i] |= (*block)[wordStart - blockWord + i];
+ }
+ }
+ }
+};
+
+} // namespace js
+
+#endif // ds_Bitmap_h
diff --git a/js/src/ds/Fifo.h b/js/src/ds/Fifo.h
new file mode 100644
index 0000000000..0237cc163b
--- /dev/null
+++ b/js/src/ds/Fifo.h
@@ -0,0 +1,187 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Fifo_h
+#define js_Fifo_h
+
+#include <algorithm>
+#include <utility>
+
+#include "js/Vector.h"
+
+namespace js {
+
+// A first-in-first-out queue container type. Fifo calls constructors and
+// destructors of all elements added so non-PODs may be used safely. |Fifo|
+// stores the first |MinInlineCapacity| elements in-place before resorting to
+// dynamic allocation.
+//
+// T requirements:
+// - Either movable or copyable.
+// MinInlineCapacity requirements:
+// - Must be even.
+// AllocPolicy:
+// - see "Allocation policies" in AllocPolicy.h
+template <typename T, size_t MinInlineCapacity = 0,
+ class AllocPolicy = TempAllocPolicy>
+class Fifo {
+ static_assert(MinInlineCapacity % 2 == 0, "MinInlineCapacity must be even!");
+
+ protected:
+ // An element A is "younger" than an element B if B was inserted into the
+ // |Fifo| before A was.
+ //
+ // Invariant 1: Every element within |front_| is older than every element
+ // within |rear_|.
+ // Invariant 2: Entries within |front_| are sorted from younger to older.
+ // Invariant 3: Entries within |rear_| are sorted from older to younger.
+ // Invariant 4: If the |Fifo| is not empty, then |front_| is not empty.
+ Vector<T, MinInlineCapacity / 2, AllocPolicy> front_;
+ Vector<T, MinInlineCapacity / 2, AllocPolicy> rear_;
+
+ private:
+ // Maintain invariants after adding or removing entries.
+ void fixup() {
+ if (front_.empty() && !rear_.empty()) {
+ front_.swap(rear_);
+ std::reverse(front_.begin(), front_.end());
+ }
+ }
+
+ public:
+ explicit Fifo(AllocPolicy alloc = AllocPolicy())
+ : front_(alloc), rear_(alloc) {}
+
+ Fifo(Fifo&& rhs)
+ : front_(std::move(rhs.front_)), rear_(std::move(rhs.rear_)) {}
+
+ Fifo& operator=(Fifo&& rhs) {
+ MOZ_ASSERT(&rhs != this, "self-move disallowed");
+ this->~Fifo();
+ new (this) Fifo(std::move(rhs));
+ return *this;
+ }
+
+ Fifo(const Fifo&) = delete;
+ Fifo& operator=(const Fifo&) = delete;
+
+ size_t length() const {
+ MOZ_ASSERT_IF(rear_.length() > 0, front_.length() > 0); // Invariant 4.
+ return front_.length() + rear_.length();
+ }
+
+ bool empty() const {
+ MOZ_ASSERT_IF(rear_.length() > 0, front_.length() > 0); // Invariant 4.
+ return front_.empty();
+ }
+
+ // Iterator from oldest to yongest element.
+ struct ConstIterator {
+ const Fifo& self_;
+ size_t idx_;
+
+ ConstIterator(const Fifo& self, size_t idx) : self_(self), idx_(idx) {}
+
+ ConstIterator& operator++() {
+ ++idx_;
+ return *this;
+ }
+
+ const T& operator*() const {
+ // Iterate front in reverse, then rear.
+ size_t split = self_.front_.length();
+ return (idx_ < split) ? self_.front_[(split - 1) - idx_]
+ : self_.rear_[idx_ - split];
+ }
+
+ bool operator!=(const ConstIterator& other) const {
+ return (&self_ != &other.self_) || (idx_ != other.idx_);
+ }
+ };
+
+ ConstIterator begin() const { return ConstIterator(*this, 0); }
+
+ ConstIterator end() const { return ConstIterator(*this, length()); }
+
+ // Push an element to the back of the queue. This method can take either a
+ // |const T&| or a |T&&|.
+ template <typename U>
+ [[nodiscard]] bool pushBack(U&& u) {
+ if (!rear_.append(std::forward<U>(u))) {
+ return false;
+ }
+ fixup();
+ return true;
+ }
+
+ // Construct a T in-place at the back of the queue.
+ template <typename... Args>
+ [[nodiscard]] bool emplaceBack(Args&&... args) {
+ if (!rear_.emplaceBack(std::forward<Args>(args)...)) {
+ return false;
+ }
+ fixup();
+ return true;
+ }
+
+ // Access the element at the front of the queue.
+ T& front() {
+ MOZ_ASSERT(!empty());
+ return front_.back();
+ }
+ const T& front() const {
+ MOZ_ASSERT(!empty());
+ return front_.back();
+ }
+
+ // Remove the front element from the queue.
+ void popFront() {
+ MOZ_ASSERT(!empty());
+ front_.popBack();
+ fixup();
+ }
+
+ // Convenience utility.
+ T popCopyFront() {
+ T ret = front();
+ popFront();
+ return ret;
+ }
+
+ // Clear all elements from the queue.
+ void clear() {
+ front_.clear();
+ rear_.clear();
+ }
+
+ // Clear all elements for which the given predicate returns 'true'. Return
+ // the number of elements removed.
+ template <class Pred>
+ size_t eraseIf(Pred pred) {
+ size_t frontLength = front_.length();
+ front_.eraseIf(pred);
+ size_t erased = frontLength - front_.length();
+
+ size_t rearLength = rear_.length();
+ rear_.eraseIf(pred);
+ erased += rearLength - rear_.length();
+
+ fixup();
+ return erased;
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return front_.sizeOfExcludingThis(mallocSizeOf) +
+ rear_.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+} // namespace js
+
+#endif /* js_Fifo_h */
diff --git a/js/src/ds/FixedLengthVector.h b/js/src/ds/FixedLengthVector.h
new file mode 100644
index 0000000000..64ae459ec4
--- /dev/null
+++ b/js/src/ds/FixedLengthVector.h
@@ -0,0 +1,111 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_FixedLengthVector_h
+#define ds_FixedLengthVector_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/OperatorNewExtensions.h" // mozilla::KnownNotNull
+
+#include <stddef.h> // size_t
+
+#include "js/Utility.h" // js_free
+#include "vm/JSContext.h" // JSContext
+
+namespace js {
+
+// A dynamically-allocated fixed-length vector with bounds checking assertions.
+template <typename T>
+class FixedLengthVector {
+ // The pointer to the storage.
+ T* data_ = nullptr;
+
+ // The size of the storage.
+ size_t length_ = 0;
+
+ public:
+ FixedLengthVector() = default;
+
+ FixedLengthVector(FixedLengthVector&) = delete;
+ FixedLengthVector(FixedLengthVector&&) = default;
+
+ ~FixedLengthVector() {
+ if (initialized()) {
+ js_free(data_);
+ }
+ }
+
+ size_t length() const { return length_; }
+
+ bool initialized() const { return !!data_; }
+
+ // Allocate the storage with the given size, wihtout calling constructor.
+ //
+ // If the allocation fails, this returns false and sets the
+ // pending exception on the given context.
+ [[nodiscard]] bool allocateUninitialized(JSContext* cx, size_t length) {
+ MOZ_ASSERT(!initialized());
+
+ length_ = length;
+ data_ = cx->pod_malloc<T>(length);
+ if (MOZ_UNLIKELY(!data_)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ // Allocate the storage with the given size and call default constructor.
+ //
+ // If the allocation fails, this returns false and sets the
+ // pending exception on the given context.
+ [[nodiscard]] bool allocate(JSContext* cx, size_t length) {
+ if (!allocateUninitialized(cx, length)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < length; i++) {
+ new (mozilla::KnownNotNull, &data_[i]) T();
+ }
+ return true;
+ }
+
+ T* begin() {
+ MOZ_ASSERT(initialized());
+ return data_;
+ }
+
+ const T* begin() const {
+ MOZ_ASSERT(initialized());
+ return data_;
+ }
+
+ T* end() {
+ MOZ_ASSERT(initialized());
+ return data_ + length_;
+ }
+
+ const T* end() const {
+ MOZ_ASSERT(initialized());
+ return data_ + length_;
+ }
+
+ T& operator[](size_t index) {
+ MOZ_ASSERT(initialized());
+ MOZ_ASSERT(index < length_);
+ return begin()[index];
+ }
+
+ const T& operator[](size_t index) const {
+ MOZ_ASSERT(initialized());
+ MOZ_ASSERT(index < length_);
+ return begin()[index];
+ }
+};
+
+} // namespace js
+
+#endif // ds_FixedLengthVector_h
diff --git a/js/src/ds/IdValuePair.h b/js/src/ds/IdValuePair.h
new file mode 100644
index 0000000000..523c574eb2
--- /dev/null
+++ b/js/src/ds/IdValuePair.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_IdValuePair_h
+#define ds_IdValuePair_h
+
+#include "gc/Tracer.h"
+#include "js/GCVector.h"
+#include "js/Id.h"
+#include "js/Value.h"
+
+namespace js {
+
+struct IdValuePair {
+ JS::Value value;
+ jsid id;
+
+ IdValuePair() : value(JS::UndefinedValue()), id(JS::PropertyKey::Void()) {}
+ explicit IdValuePair(jsid idArg) : value(JS::UndefinedValue()), id(idArg) {}
+ IdValuePair(jsid idArg, const Value& valueArg) : value(valueArg), id(idArg) {}
+
+ void trace(JSTracer* trc) {
+ TraceRoot(trc, &value, "IdValuePair::value");
+ TraceRoot(trc, &id, "IdValuePair::id");
+ }
+};
+
+using IdValueVector = JS::GCVector<IdValuePair, 8>;
+
+} /* namespace js */
+
+#endif /* ds_IdValuePair_h */
diff --git a/js/src/ds/InlineTable.h b/js/src/ds/InlineTable.h
new file mode 100644
index 0000000000..fe6fd22295
--- /dev/null
+++ b/js/src/ds/InlineTable.h
@@ -0,0 +1,644 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_InlineTable_h
+#define ds_InlineTable_h
+
+#include "mozilla/Maybe.h"
+
+#include <utility>
+
+#include "js/AllocPolicy.h"
+#include "js/HashTable.h"
+
+namespace js {
+
+namespace detail {
+
+// The InlineTable below needs an abstract way of testing keys for
+// tombstone values, and to set a key in an entry to a tombstone.
+// This is provided by the KeyPolicy generic type argument, which
+// has a default implementation for pointers provided below.
+
+// A default implementation of a KeyPolicy for some types (only pointer
+// types for now).
+//
+// The `KeyPolicy` type parameter informs an InlineTable of how to
+// check for tombstone values and to set tombstone values within
+// the domain of key (entry).
+//
+// A `KeyPolicy` for some key type `K` must provide two static methods:
+// static bool isTombstone(const K& key);
+// static void setToTombstone(K& key);
+template <typename K>
+class DefaultKeyPolicy;
+
+template <typename T>
+class DefaultKeyPolicy<T*> {
+ DefaultKeyPolicy() = delete;
+ DefaultKeyPolicy(const T*&) = delete;
+
+ public:
+ static bool isTombstone(T* const& ptr) { return ptr == nullptr; }
+ static void setToTombstone(T*& ptr) { ptr = nullptr; }
+};
+
+template <typename InlineEntry, typename Entry, typename Table,
+ typename HashPolicy, typename AllocPolicy, typename KeyPolicy,
+ size_t InlineEntries>
+class InlineTable : private AllocPolicy {
+ private:
+ using TablePtr = typename Table::Ptr;
+ using TableAddPtr = typename Table::AddPtr;
+ using TableRange = typename Table::Range;
+ using Lookup = typename HashPolicy::Lookup;
+
+ size_t inlNext_;
+ size_t inlCount_;
+ InlineEntry inl_[InlineEntries];
+ Table table_;
+
+#ifdef DEBUG
+ template <typename Key>
+ static bool keyNonZero(const Key& key) {
+ // Zero as tombstone means zero keys are invalid.
+ return !!key;
+ }
+#endif
+
+ InlineEntry* inlineStart() {
+ MOZ_ASSERT(!usingTable());
+ return inl_;
+ }
+
+ const InlineEntry* inlineStart() const {
+ MOZ_ASSERT(!usingTable());
+ return inl_;
+ }
+
+ InlineEntry* inlineEnd() {
+ MOZ_ASSERT(!usingTable());
+ return inl_ + inlNext_;
+ }
+
+ const InlineEntry* inlineEnd() const {
+ MOZ_ASSERT(!usingTable());
+ return inl_ + inlNext_;
+ }
+
+ bool usingTable() const { return inlNext_ > InlineEntries; }
+
+ [[nodiscard]] bool switchToTable() {
+ MOZ_ASSERT(inlNext_ == InlineEntries);
+
+ table_.clearAndCompact();
+
+ InlineEntry* end = inlineEnd();
+ for (InlineEntry* it = inlineStart(); it != end; ++it) {
+ if (it->key && !it->moveTo(table_)) {
+ return false;
+ }
+ }
+
+ inlNext_ = InlineEntries + 1;
+ MOZ_ASSERT(table_.count() == inlCount_);
+ MOZ_ASSERT(usingTable());
+ return true;
+ }
+
+ [[nodiscard]] MOZ_NEVER_INLINE bool switchAndAdd(const InlineEntry& entry) {
+ if (!switchToTable()) {
+ return false;
+ }
+
+ return entry.putNew(table_);
+ }
+
+ public:
+ static const size_t SizeOfInlineEntries = sizeof(InlineEntry) * InlineEntries;
+
+ explicit InlineTable(AllocPolicy a = AllocPolicy())
+ : AllocPolicy(std::move(a)), inlNext_(0), inlCount_(0), table_(a) {}
+
+ class Ptr {
+ friend class InlineTable;
+
+ protected:
+ MOZ_INIT_OUTSIDE_CTOR Entry entry_;
+ MOZ_INIT_OUTSIDE_CTOR TablePtr tablePtr_;
+ MOZ_INIT_OUTSIDE_CTOR InlineEntry* inlPtr_;
+ MOZ_INIT_OUTSIDE_CTOR bool isInlinePtr_;
+
+ explicit Ptr(TablePtr p)
+ : entry_(p.found() ? &*p : nullptr),
+ tablePtr_(p),
+ isInlinePtr_(false) {}
+
+ explicit Ptr(InlineEntry* inlineEntry)
+ : entry_(inlineEntry), inlPtr_(inlineEntry), isInlinePtr_(true) {}
+
+ void operator==(const Ptr& other);
+
+ public:
+ // Leaves Ptr uninitialized.
+ Ptr() {
+#ifdef DEBUG
+ inlPtr_ = (InlineEntry*)0xbad;
+ isInlinePtr_ = true;
+#endif
+ }
+
+ // Default copy constructor works for this structure.
+
+ bool found() const {
+ return isInlinePtr_ ? bool(inlPtr_) : tablePtr_.found();
+ }
+
+ explicit operator bool() const { return found(); }
+
+ bool operator==(const Ptr& other) const {
+ MOZ_ASSERT(found() && other.found());
+ if (isInlinePtr_ != other.isInlinePtr_) {
+ return false;
+ }
+ if (isInlinePtr_) {
+ return inlPtr_ == other.inlPtr_;
+ }
+ return tablePtr_ == other.tablePtr_;
+ }
+
+ bool operator!=(const Ptr& other) const { return !(*this == other); }
+
+ Entry& operator*() {
+ MOZ_ASSERT(found());
+ return entry_;
+ }
+
+ Entry* operator->() {
+ MOZ_ASSERT(found());
+ return &entry_;
+ }
+ };
+
+ class AddPtr {
+ friend class InlineTable;
+
+ protected:
+ MOZ_INIT_OUTSIDE_CTOR Entry entry_;
+ MOZ_INIT_OUTSIDE_CTOR TableAddPtr tableAddPtr_;
+ MOZ_INIT_OUTSIDE_CTOR InlineEntry* inlAddPtr_;
+ MOZ_INIT_OUTSIDE_CTOR bool isInlinePtr_;
+ // Indicates whether inlAddPtr is a found result or an add pointer.
+ MOZ_INIT_OUTSIDE_CTOR bool inlPtrFound_;
+
+ AddPtr(InlineEntry* ptr, bool found)
+ : entry_(ptr),
+ inlAddPtr_(ptr),
+ isInlinePtr_(true),
+ inlPtrFound_(found) {}
+
+ explicit AddPtr(const TableAddPtr& p)
+ : entry_(p.found() ? &*p : nullptr),
+ tableAddPtr_(p),
+ isInlinePtr_(false) {}
+
+ public:
+ AddPtr() = default;
+
+ bool found() const {
+ return isInlinePtr_ ? inlPtrFound_ : tableAddPtr_.found();
+ }
+
+ explicit operator bool() const { return found(); }
+
+ bool operator==(const AddPtr& other) const {
+ MOZ_ASSERT(found() && other.found());
+ if (isInlinePtr_ != other.isInlinePtr_) {
+ return false;
+ }
+ if (isInlinePtr_) {
+ return inlAddPtr_ == other.inlAddPtr_;
+ }
+ return tableAddPtr_ == other.tableAddPtr_;
+ }
+
+ bool operator!=(const AddPtr& other) const { return !(*this == other); }
+
+ Entry& operator*() {
+ MOZ_ASSERT(found());
+ return entry_;
+ }
+
+ Entry* operator->() {
+ MOZ_ASSERT(found());
+ return &entry_;
+ }
+ };
+
+ size_t count() const { return usingTable() ? table_.count() : inlCount_; }
+
+ bool empty() const { return usingTable() ? table_.empty() : !inlCount_; }
+
+ void clear() {
+ inlNext_ = 0;
+ inlCount_ = 0;
+ }
+
+ MOZ_ALWAYS_INLINE
+ Ptr lookup(const Lookup& l) {
+ MOZ_ASSERT(keyNonZero(l));
+
+ if (usingTable()) {
+ return Ptr(table_.lookup(l));
+ }
+
+ InlineEntry* end = inlineEnd();
+ for (InlineEntry* it = inlineStart(); it != end; ++it) {
+ if (it->key && HashPolicy::match(it->key, l)) {
+ return Ptr(it);
+ }
+ }
+
+ return Ptr(nullptr);
+ }
+
+ MOZ_ALWAYS_INLINE
+ AddPtr lookupForAdd(const Lookup& l) {
+ MOZ_ASSERT(keyNonZero(l));
+
+ if (usingTable()) {
+ return AddPtr(table_.lookupForAdd(l));
+ }
+
+ InlineEntry* end = inlineEnd();
+ for (InlineEntry* it = inlineStart(); it != end; ++it) {
+ if (it->key && HashPolicy::match(it->key, l)) {
+ return AddPtr(it, true);
+ }
+ }
+
+ // The add pointer that's returned here may indicate the limit entry of
+ // the linear space, in which case the |add| operation will initialize
+ // the table if necessary and add the entry there.
+ return AddPtr(inlineEnd(), false);
+ }
+
+ template <typename KeyInput, typename... Args>
+ [[nodiscard]] MOZ_ALWAYS_INLINE bool add(AddPtr& p, KeyInput&& key,
+ Args&&... args) {
+ MOZ_ASSERT(!p);
+ MOZ_ASSERT(keyNonZero(key));
+
+ if (p.isInlinePtr_) {
+ InlineEntry* addPtr = p.inlAddPtr_;
+ MOZ_ASSERT(addPtr == inlineEnd());
+
+ // Switching to table mode before we add this pointer.
+ if (addPtr == inlineStart() + InlineEntries) {
+ if (!switchToTable()) {
+ return false;
+ }
+ return table_.putNew(std::forward<KeyInput>(key),
+ std::forward<Args>(args)...);
+ }
+
+ MOZ_ASSERT(!p.found());
+ MOZ_ASSERT(uintptr_t(inlineEnd()) == uintptr_t(p.inlAddPtr_));
+
+ if (!this->checkSimulatedOOM()) {
+ return false;
+ }
+
+ addPtr->update(std::forward<KeyInput>(key), std::forward<Args>(args)...);
+ ++inlCount_;
+ ++inlNext_;
+ return true;
+ }
+
+ return table_.add(p.tableAddPtr_, std::forward<KeyInput>(key),
+ std::forward<Args>(args)...);
+ }
+
+ void remove(Ptr& p) {
+ MOZ_ASSERT(p);
+ if (p.isInlinePtr_) {
+ MOZ_ASSERT(inlCount_ > 0);
+ MOZ_ASSERT(!KeyPolicy::isTombstone(p.inlPtr_->key));
+ KeyPolicy::setToTombstone(p.inlPtr_->key);
+ --inlCount_;
+ return;
+ }
+ MOZ_ASSERT(usingTable());
+ table_.remove(p.tablePtr_);
+ }
+
+ void remove(const Lookup& l) {
+ if (Ptr p = lookup(l)) {
+ remove(p);
+ }
+ }
+
+ class Range {
+ friend class InlineTable;
+
+ protected:
+ mozilla::Maybe<TableRange> tableRange_; // `Nothing` if `isInline_==true`
+ InlineEntry* cur_;
+ InlineEntry* end_;
+ bool isInline_;
+
+ explicit Range(TableRange r)
+ : tableRange_(mozilla::Some(r)),
+ cur_(nullptr),
+ end_(nullptr),
+ isInline_(false) {
+ MOZ_ASSERT(!isInlineRange());
+ }
+
+ Range(const InlineEntry* begin, const InlineEntry* end)
+ : tableRange_(mozilla::Nothing()),
+ cur_(const_cast<InlineEntry*>(begin)),
+ end_(const_cast<InlineEntry*>(end)),
+ isInline_(true) {
+ advancePastNulls(cur_);
+ MOZ_ASSERT(isInlineRange());
+ }
+
+ bool assertInlineRangeInvariants() const {
+ MOZ_ASSERT(uintptr_t(cur_) <= uintptr_t(end_));
+ MOZ_ASSERT_IF(cur_ != end_, !KeyPolicy::isTombstone(cur_->key));
+ return true;
+ }
+
+ bool isInlineRange() const {
+ MOZ_ASSERT_IF(isInline_, assertInlineRangeInvariants());
+ return isInline_;
+ }
+
+ void advancePastNulls(InlineEntry* begin) {
+ InlineEntry* newCur = begin;
+ while (newCur < end_ && KeyPolicy::isTombstone(newCur->key)) {
+ ++newCur;
+ }
+ MOZ_ASSERT(uintptr_t(newCur) <= uintptr_t(end_));
+ cur_ = newCur;
+ }
+
+ void bumpCurPtr() {
+ MOZ_ASSERT(isInlineRange());
+ advancePastNulls(cur_ + 1);
+ }
+
+ public:
+ bool empty() const {
+ return isInlineRange() ? cur_ == end_ : tableRange_->empty();
+ }
+
+ Entry front() {
+ MOZ_ASSERT(!empty());
+ if (isInlineRange()) {
+ return Entry(cur_);
+ }
+ return Entry(&tableRange_->front());
+ }
+
+ void popFront() {
+ MOZ_ASSERT(!empty());
+ if (isInlineRange()) {
+ bumpCurPtr();
+ } else {
+ tableRange_->popFront();
+ }
+ }
+ };
+
+ Range all() const {
+ return usingTable() ? Range(table_.all())
+ : Range(inlineStart(), inlineEnd());
+ }
+};
+
+} // namespace detail
+
+// A map with InlineEntries number of entries kept inline in an array.
+//
+// The Key type must be zeroable as zeros are used as tombstone keys.
+// The Value type must have a default constructor.
+//
+// The API is very much like HashMap's.
+template <typename Key, typename Value, size_t InlineEntries,
+ typename HashPolicy = DefaultHasher<Key>,
+ typename AllocPolicy = TempAllocPolicy,
+ typename KeyPolicy = detail::DefaultKeyPolicy<Key>>
+class InlineMap {
+ using Map = HashMap<Key, Value, HashPolicy, AllocPolicy>;
+
+ struct InlineEntry {
+ Key key;
+ Value value;
+
+ template <typename KeyInput, typename ValueInput>
+ void update(KeyInput&& key, ValueInput&& value) {
+ this->key = std::forward<KeyInput>(key);
+ this->value = std::forward<ValueInput>(value);
+ }
+
+ [[nodiscard]] bool moveTo(Map& map) {
+ return map.putNew(std::move(key), std::move(value));
+ }
+ };
+
+ class Entry {
+ using MapEntry = typename Map::Entry;
+
+ MapEntry* mapEntry_;
+ InlineEntry* inlineEntry_;
+
+ public:
+ Entry() = default;
+
+ explicit Entry(MapEntry* mapEntry)
+ : mapEntry_(mapEntry), inlineEntry_(nullptr) {}
+
+ explicit Entry(InlineEntry* inlineEntry)
+ : mapEntry_(nullptr), inlineEntry_(inlineEntry) {}
+
+ const Key& key() const {
+ MOZ_ASSERT(!!mapEntry_ != !!inlineEntry_);
+ if (mapEntry_) {
+ return mapEntry_->key();
+ }
+ return inlineEntry_->key;
+ }
+
+ Value& value() {
+ MOZ_ASSERT(!!mapEntry_ != !!inlineEntry_);
+ if (mapEntry_) {
+ return mapEntry_->value();
+ }
+ return inlineEntry_->value;
+ }
+ };
+
+ using Impl = detail::InlineTable<InlineEntry, Entry, Map, HashPolicy,
+ AllocPolicy, KeyPolicy, InlineEntries>;
+
+ Impl impl_;
+
+ public:
+ using Table = Map;
+ using Ptr = typename Impl::Ptr;
+ using AddPtr = typename Impl::AddPtr;
+ using Range = typename Impl::Range;
+ using Lookup = typename HashPolicy::Lookup;
+
+ static const size_t SizeOfInlineEntries = Impl::SizeOfInlineEntries;
+
+ explicit InlineMap(AllocPolicy a = AllocPolicy()) : impl_(std::move(a)) {}
+
+ size_t count() const { return impl_.count(); }
+
+ bool empty() const { return impl_.empty(); }
+
+ void clear() { impl_.clear(); }
+
+ Range all() const { return impl_.all(); }
+
+ MOZ_ALWAYS_INLINE
+ Ptr lookup(const Lookup& l) { return impl_.lookup(l); }
+
+ MOZ_ALWAYS_INLINE
+ bool has(const Lookup& l) const {
+ return const_cast<InlineMap*>(this)->lookup(l).found();
+ }
+
+ MOZ_ALWAYS_INLINE
+ AddPtr lookupForAdd(const Lookup& l) { return impl_.lookupForAdd(l); }
+
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] MOZ_ALWAYS_INLINE bool add(AddPtr& p, KeyInput&& key,
+ ValueInput&& value) {
+ return impl_.add(p, std::forward<KeyInput>(key),
+ std::forward<ValueInput>(value));
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ [[nodiscard]] bool put(KeyInput&& key, ValueInput&& value) {
+ AddPtr p = lookupForAdd(key);
+ if (p) {
+ p->value() = std::forward<ValueInput>(value);
+ return true;
+ }
+ return add(p, std::forward<KeyInput>(key), std::forward<ValueInput>(value));
+ }
+
+ void remove(Ptr& p) { impl_.remove(p); }
+
+ void remove(const Lookup& l) { impl_.remove(l); }
+};
+
+// A set with InlineEntries number of entries kept inline in an array.
+//
+// The T type must be zeroable as zeros are used as tombstone keys.
+// The T type must have a default constructor.
+//
+// The API is very much like HashMap's.
+template <typename T, size_t InlineEntries,
+ typename HashPolicy = DefaultHasher<T>,
+ typename AllocPolicy = TempAllocPolicy,
+ typename KeyPolicy = detail::DefaultKeyPolicy<T>>
+class InlineSet {
+ using Set = HashSet<T, HashPolicy, AllocPolicy>;
+
+ struct InlineEntry {
+ T key;
+
+ template <typename TInput>
+ void update(TInput&& key) {
+ this->key = std::forward<TInput>(key);
+ }
+
+ [[nodiscard]] bool moveTo(Set& set) { return set.putNew(std::move(key)); }
+ };
+
+ class Entry {
+ using SetEntry = typename Set::Entry;
+
+ SetEntry* setEntry_;
+ InlineEntry* inlineEntry_;
+
+ public:
+ Entry() = default;
+
+ explicit Entry(const SetEntry* setEntry)
+ : setEntry_(const_cast<SetEntry*>(setEntry)), inlineEntry_(nullptr) {}
+
+ explicit Entry(InlineEntry* inlineEntry)
+ : setEntry_(nullptr), inlineEntry_(inlineEntry) {}
+
+ operator T() const {
+ MOZ_ASSERT(!!setEntry_ != !!inlineEntry_);
+ if (setEntry_) {
+ return *setEntry_;
+ }
+ return inlineEntry_->key;
+ }
+ };
+
+ using Impl = detail::InlineTable<InlineEntry, Entry, Set, HashPolicy,
+ AllocPolicy, KeyPolicy, InlineEntries>;
+
+ Impl impl_;
+
+ public:
+ using Table = Set;
+ using Ptr = typename Impl::Ptr;
+ using AddPtr = typename Impl::AddPtr;
+ using Range = typename Impl::Range;
+ using Lookup = typename HashPolicy::Lookup;
+
+ static const size_t SizeOfInlineEntries = Impl::SizeOfInlineEntries;
+
+ explicit InlineSet(AllocPolicy a = AllocPolicy()) : impl_(std::move(a)) {}
+
+ size_t count() const { return impl_.count(); }
+
+ bool empty() const { return impl_.empty(); }
+
+ void clear() { impl_.clear(); }
+
+ Range all() const { return impl_.all(); }
+
+ MOZ_ALWAYS_INLINE
+ Ptr lookup(const Lookup& l) { return impl_.lookup(l); }
+
+ MOZ_ALWAYS_INLINE
+ bool has(const Lookup& l) const {
+ return const_cast<InlineSet*>(this)->lookup(l).found();
+ }
+
+ MOZ_ALWAYS_INLINE
+ AddPtr lookupForAdd(const Lookup& l) { return impl_.lookupForAdd(l); }
+
+ template <typename TInput>
+ [[nodiscard]] MOZ_ALWAYS_INLINE bool add(AddPtr& p, TInput&& key) {
+ return impl_.add(p, std::forward<TInput>(key));
+ }
+
+ template <typename TInput>
+ [[nodiscard]] bool put(TInput&& key) {
+ AddPtr p = lookupForAdd(key);
+ return p ? true : add(p, std::forward<TInput>(key));
+ }
+
+ void remove(Ptr& p) { impl_.remove(p); }
+
+ void remove(const Lookup& l) { impl_.remove(l); }
+};
+
+} // namespace js
+
+#endif // ds_InlineTable_h
diff --git a/js/src/ds/LifoAlloc.cpp b/js/src/ds/LifoAlloc.cpp
new file mode 100644
index 0000000000..8f73c04b94
--- /dev/null
+++ b/js/src/ds/LifoAlloc.cpp
@@ -0,0 +1,425 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ds/LifoAlloc.h"
+
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+
+#ifdef LIFO_CHUNK_PROTECT
+# include "gc/Memory.h"
+#endif
+
+using namespace js;
+
+using mozilla::tl::BitSize;
+
+namespace js {
+namespace detail {
+
+/* static */
+UniquePtr<BumpChunk> BumpChunk::newWithCapacity(size_t size) {
+ MOZ_DIAGNOSTIC_ASSERT(size >= sizeof(BumpChunk));
+ void* mem = js_malloc(size);
+ if (!mem) {
+ return nullptr;
+ }
+
+ UniquePtr<BumpChunk> result(new (mem) BumpChunk(size));
+
+ // We assume that the alignment of LIFO_ALLOC_ALIGN is less than that of the
+ // underlying memory allocator -- creating a new BumpChunk should always
+ // satisfy the LIFO_ALLOC_ALIGN alignment constraint.
+ MOZ_ASSERT(AlignPtr(result->begin()) == result->begin());
+ return result;
+}
+
+#ifdef LIFO_CHUNK_PROTECT
+
+static uint8_t* AlignPtrUp(uint8_t* ptr, uintptr_t align) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
+ uintptr_t uptr = uintptr_t(ptr);
+ uintptr_t diff = uptr & (align - 1);
+ diff = (align - diff) & (align - 1);
+ uptr = uptr + diff;
+ return (uint8_t*)uptr;
+}
+
+static uint8_t* AlignPtrDown(uint8_t* ptr, uintptr_t align) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
+ uintptr_t uptr = uintptr_t(ptr);
+ uptr = uptr & ~(align - 1);
+ return (uint8_t*)uptr;
+}
+
+void BumpChunk::setReadOnly() {
+ uintptr_t pageSize = gc::SystemPageSize();
+ // The allocated chunks might not be aligned on page boundaries. This code
+ // is used to ensure that we are changing the memory protection of pointers
+ // which are within the range of the BumpChunk, or that the range formed by
+ // [b .. e] is empty.
+ uint8_t* b = base();
+ uint8_t* e = capacity_;
+ b = AlignPtrUp(b, pageSize);
+ e = AlignPtrDown(e, pageSize);
+ if (e <= b) {
+ return;
+ }
+ gc::MakePagesReadOnly(b, e - b);
+}
+
+void BumpChunk::setReadWrite() {
+ uintptr_t pageSize = gc::SystemPageSize();
+ // The allocated chunks might not be aligned on page boundaries. This code
+ // is used to ensure that we are changing the memory protection of pointers
+ // which are within the range of the BumpChunk, or that the range formed by
+ // [b .. e] is empty.
+ uint8_t* b = base();
+ uint8_t* e = capacity_;
+ b = AlignPtrUp(b, pageSize);
+ e = AlignPtrDown(e, pageSize);
+ if (e <= b) {
+ return;
+ }
+ gc::UnprotectPages(b, e - b);
+}
+
+#endif
+
+} // namespace detail
+} // namespace js
+
+void LifoAlloc::reset(size_t defaultChunkSize) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(defaultChunkSize));
+
+ while (!chunks_.empty()) {
+ chunks_.popFirst();
+ }
+ while (!oversize_.empty()) {
+ oversize_.popFirst();
+ }
+ while (!unused_.empty()) {
+ unused_.popFirst();
+ }
+ defaultChunkSize_ = defaultChunkSize;
+ oversizeThreshold_ = defaultChunkSize;
+ markCount = 0;
+ curSize_ = 0;
+ smallAllocsSize_ = 0;
+}
+
+void LifoAlloc::freeAll() {
+ // When free-ing all chunks, we can no longer determine which chunks were
+ // transferred and which were not, so simply clear the heuristic to zero
+ // right away.
+ smallAllocsSize_ = 0;
+
+ while (!chunks_.empty()) {
+ UniqueBumpChunk bc = chunks_.popFirst();
+ decrementCurSize(bc->computedSizeOfIncludingThis());
+ }
+ while (!oversize_.empty()) {
+ UniqueBumpChunk bc = oversize_.popFirst();
+ decrementCurSize(bc->computedSizeOfIncludingThis());
+ }
+ while (!unused_.empty()) {
+ UniqueBumpChunk bc = unused_.popFirst();
+ decrementCurSize(bc->computedSizeOfIncludingThis());
+ }
+
+ // Nb: maintaining curSize_ correctly isn't easy. Fortunately, this is an
+ // excellent sanity check.
+ MOZ_ASSERT(curSize_ == 0);
+}
+
+// Round at the same page granularity used by malloc.
+static size_t MallocGoodSize(size_t aSize) {
+#if defined(MOZ_MEMORY)
+ return malloc_good_size(aSize);
+#else
+ return aSize;
+#endif
+}
+
+// Heuristic to choose the size of the next BumpChunk for small allocations.
+// `start` is the size of the first chunk. `used` is the total size of all
+// BumpChunks in this LifoAlloc so far.
+static size_t NextSize(size_t start, size_t used) {
+ // Double the size, up to 1 MB.
+ const size_t mb = 1 * 1024 * 1024;
+ if (used < mb) {
+ return std::max(start, used);
+ }
+
+ // After 1 MB, grow more gradually, to waste less memory.
+ // The sequence (in megabytes) begins:
+ // 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, ...
+ return RoundUp(used / 8, mb);
+}
+
+LifoAlloc::UniqueBumpChunk LifoAlloc::newChunkWithCapacity(size_t n,
+ bool oversize) {
+ MOZ_ASSERT(fallibleScope_,
+ "[OOM] Cannot allocate a new chunk in an infallible scope.");
+
+ // Compute the size which should be requested in order to be able to fit |n|
+ // bytes in a newly allocated chunk, or default to |defaultChunkSize_|.
+
+ size_t minSize;
+ if (MOZ_UNLIKELY(!detail::BumpChunk::allocSizeWithRedZone(n, &minSize) ||
+ (minSize & (size_t(1) << (BitSize<size_t>::value - 1))))) {
+ return nullptr;
+ }
+
+ // Note: When computing chunkSize growth, we only are interested in chunks
+ // used for small allocations. This excludes unused chunks, oversized chunks,
+ // and chunks transferred in from another LifoAlloc.
+ MOZ_ASSERT(curSize_ >= smallAllocsSize_);
+ const size_t chunkSize = (oversize || minSize > defaultChunkSize_)
+ ? MallocGoodSize(minSize)
+ : NextSize(defaultChunkSize_, smallAllocsSize_);
+
+ // Create a new BumpChunk, and allocate space for it.
+ UniqueBumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize);
+ if (!result) {
+ return nullptr;
+ }
+ MOZ_ASSERT(result->computedSizeOfIncludingThis() == chunkSize);
+ return result;
+}
+
+LifoAlloc::UniqueBumpChunk LifoAlloc::getOrCreateChunk(size_t n) {
+ // Look for existing unused BumpChunks to satisfy the request, and pick the
+ // first one which is large enough, and move it into the list of used
+ // chunks.
+ if (!unused_.empty()) {
+ if (unused_.begin()->canAlloc(n)) {
+ return unused_.popFirst();
+ }
+
+ BumpChunkList::Iterator e(unused_.end());
+ for (BumpChunkList::Iterator i(unused_.begin()); i->next() != e.get();
+ ++i) {
+ detail::BumpChunk* elem = i->next();
+ MOZ_ASSERT(elem->empty());
+ if (elem->canAlloc(n)) {
+ BumpChunkList temp = unused_.splitAfter(i.get());
+ UniqueBumpChunk newChunk = temp.popFirst();
+ unused_.appendAll(std::move(temp));
+ return newChunk;
+ }
+ }
+ }
+
+ // Allocate a new BumpChunk with enough space for the next allocation.
+ UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
+ if (!newChunk) {
+ return newChunk;
+ }
+ incrementCurSize(newChunk->computedSizeOfIncludingThis());
+ return newChunk;
+}
+
+void* LifoAlloc::allocImplColdPath(size_t n) {
+ void* result;
+ UniqueBumpChunk newChunk = getOrCreateChunk(n);
+ if (!newChunk) {
+ return nullptr;
+ }
+
+ // This new chunk is about to be used for small allocations.
+ smallAllocsSize_ += newChunk->computedSizeOfIncludingThis();
+
+ // Since we just created a large enough chunk, this can't fail.
+ chunks_.append(std::move(newChunk));
+ result = chunks_.last()->tryAlloc(n);
+ MOZ_ASSERT(result);
+ return result;
+}
+
+void* LifoAlloc::allocImplOversize(size_t n) {
+ void* result;
+ UniqueBumpChunk newChunk = newChunkWithCapacity(n, true);
+ if (!newChunk) {
+ return nullptr;
+ }
+ incrementCurSize(newChunk->computedSizeOfIncludingThis());
+
+ // Since we just created a large enough chunk, this can't fail.
+ oversize_.append(std::move(newChunk));
+ result = oversize_.last()->tryAlloc(n);
+ MOZ_ASSERT(result);
+ return result;
+}
+
+bool LifoAlloc::ensureUnusedApproximateColdPath(size_t n, size_t total) {
+ for (detail::BumpChunk& bc : unused_) {
+ total += bc.unused();
+ if (total >= n) {
+ return true;
+ }
+ }
+
+ UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
+ if (!newChunk) {
+ return false;
+ }
+ incrementCurSize(newChunk->computedSizeOfIncludingThis());
+ unused_.pushFront(std::move(newChunk));
+ return true;
+}
+
+LifoAlloc::Mark LifoAlloc::mark() {
+ markCount++;
+ Mark res;
+ if (!chunks_.empty()) {
+ res.chunk = chunks_.last()->mark();
+ }
+ if (!oversize_.empty()) {
+ res.oversize = oversize_.last()->mark();
+ }
+ return res;
+}
+
+void LifoAlloc::release(Mark mark) {
+ markCount--;
+#ifdef DEBUG
+ auto assertIsContained = [](const detail::BumpChunk::Mark& m,
+ BumpChunkList& list) {
+ if (m.markedChunk()) {
+ bool contained = false;
+ for (const detail::BumpChunk& chunk : list) {
+ if (&chunk == m.markedChunk() && chunk.contains(m)) {
+ contained = true;
+ break;
+ }
+ }
+ MOZ_ASSERT(contained);
+ }
+ };
+ assertIsContained(mark.chunk, chunks_);
+ assertIsContained(mark.oversize, oversize_);
+#endif
+
+ BumpChunkList released;
+ auto cutAtMark = [&released](const detail::BumpChunk::Mark& m,
+ BumpChunkList& list) {
+ // Move the blocks which are after the mark to the set released chunks.
+ if (!m.markedChunk()) {
+ released = std::move(list);
+ } else {
+ released = list.splitAfter(m.markedChunk());
+ }
+
+ // Release everything which follows the mark in the last chunk.
+ if (!list.empty()) {
+ list.last()->release(m);
+ }
+ };
+
+ // Release the content of all the blocks which are after the marks, and keep
+ // blocks as unused.
+ cutAtMark(mark.chunk, chunks_);
+ for (detail::BumpChunk& bc : released) {
+ bc.release();
+
+ // Chunks moved from (after a mark) in chunks_ to unused_ are no longer
+ // considered small allocations.
+ smallAllocsSize_ -= bc.computedSizeOfIncludingThis();
+ }
+ unused_.appendAll(std::move(released));
+
+ // Free the content of all the blocks which are after the marks.
+ cutAtMark(mark.oversize, oversize_);
+ while (!released.empty()) {
+ UniqueBumpChunk bc = released.popFirst();
+ decrementCurSize(bc->computedSizeOfIncludingThis());
+ }
+}
+
+void LifoAlloc::steal(LifoAlloc* other) {
+ MOZ_ASSERT(!other->markCount);
+ MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
+ MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
+ MOZ_DIAGNOSTIC_ASSERT(oversize_.empty());
+
+ // Copy everything from |other| to |this| except for |peakSize_|, which
+ // requires some care.
+ chunks_ = std::move(other->chunks_);
+ oversize_ = std::move(other->oversize_);
+ unused_ = std::move(other->unused_);
+ markCount = other->markCount;
+ defaultChunkSize_ = other->defaultChunkSize_;
+ oversizeThreshold_ = other->oversizeThreshold_;
+ curSize_ = other->curSize_;
+ peakSize_ = std::max(peakSize_, other->peakSize_);
+ smallAllocsSize_ = other->smallAllocsSize_;
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ fallibleScope_ = other->fallibleScope_;
+#endif
+
+ other->reset(defaultChunkSize_);
+}
+
+void LifoAlloc::transferFrom(LifoAlloc* other) {
+ MOZ_ASSERT(!markCount);
+ MOZ_ASSERT(!other->markCount);
+
+ // Transferred chunks are not counted as part of |smallAllocsSize| as this
+ // could introduce bias in the |NextSize| heuristics, leading to
+ // over-allocations in *this* LifoAlloc. As well, to avoid interference with
+ // small allocations made with |this|, the last chunk of the |chunks_| list
+ // should remain the last chunk. Therefore, the transferred chunks are
+ // prepended to the |chunks_| list.
+ incrementCurSize(other->curSize_);
+
+ appendUnused(std::move(other->unused_));
+ chunks_.prependAll(std::move(other->chunks_));
+ oversize_.prependAll(std::move(other->oversize_));
+ other->curSize_ = 0;
+ other->smallAllocsSize_ = 0;
+}
+
+void LifoAlloc::transferUnusedFrom(LifoAlloc* other) {
+ MOZ_ASSERT(!markCount);
+
+ size_t size = 0;
+ for (detail::BumpChunk& bc : other->unused_) {
+ size += bc.computedSizeOfIncludingThis();
+ }
+
+ appendUnused(std::move(other->unused_));
+ incrementCurSize(size);
+ other->decrementCurSize(size);
+}
+
+#ifdef LIFO_CHUNK_PROTECT
+void LifoAlloc::setReadOnly() {
+ for (detail::BumpChunk& bc : chunks_) {
+ bc.setReadOnly();
+ }
+ for (detail::BumpChunk& bc : oversize_) {
+ bc.setReadOnly();
+ }
+ for (detail::BumpChunk& bc : unused_) {
+ bc.setReadOnly();
+ }
+}
+
+void LifoAlloc::setReadWrite() {
+ for (detail::BumpChunk& bc : chunks_) {
+ bc.setReadWrite();
+ }
+ for (detail::BumpChunk& bc : oversize_) {
+ bc.setReadWrite();
+ }
+ for (detail::BumpChunk& bc : unused_) {
+ bc.setReadWrite();
+ }
+}
+#endif
diff --git a/js/src/ds/LifoAlloc.h b/js/src/ds/LifoAlloc.h
new file mode 100644
index 0000000000..872a10c86f
--- /dev/null
+++ b/js/src/ds/LifoAlloc.h
@@ -0,0 +1,1196 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_LifoAlloc_h
+#define ds_LifoAlloc_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/MemoryChecking.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/TemplateLib.h"
+
+#include <algorithm>
+#include <new>
+#include <stddef.h> // size_t
+#include <type_traits>
+#include <utility>
+
+// [SMDOC] LifoAlloc bump allocator
+//
+// This file defines an allocator named LifoAlloc which is a Bump allocator,
+// which has the property of making fast allocation but is not able to reclaim
+// individual allocations.
+//
+// * Allocation principle
+//
+// In practice a LifoAlloc is implemented using a list of BumpChunks, which are
+// contiguous memory areas which are chained in a single linked list.
+//
+// When an allocation is performed, we check if there is space in the last
+// chunk. If there is we bump the pointer of the last chunk and return the
+// previous value of the pointer. Otherwise we allocate a new chunk which is
+// large enough and perform the allocation the same way.
+//
+// Each allocation is made with 2 main functions, called
+// BumpChunk::nextAllocBase and BumpChunk::nextAllocEnd. These functions are
+// made to avoid duplicating logic, such as allocating, checking if we can
+// allocate or reserving a given buffer space. They are made to align the
+// pointer for the next allocation (8-byte aligned), and also to reserve some
+// red-zones to improve reports of our security instrumentation. (see Security
+// features below)
+//
+// The Chunks sizes are following the heuristics implemented in NextSize
+// (LifoAlloc.cpp), which doubles the size until we reach 1 MB and then
+// continues with a smaller geometric series. This heuristic is meant to reduce
+// the number of allocations, such that we spend less time allocating/freeing
+// chunks of a few KB at a time.
+//
+// ** Oversize allocations
+//
+// When allocating with a LifoAlloc, we distinguish 2 different kinds of
+// allocations, the small allocations and the large allocations. The reason for
+// splitting in 2 sets is to avoid wasting memory.
+//
+// If you had a single linked list of chunks, then making oversized allocations
+// can cause chunks to contain a lot of wasted space as new chunks would have to
+// be allocated to fit these allocations, and the space of the previous chunk
+// would remain unused.
+//
+// Oversize allocation size can be disabled or customized with disableOversize
+// and setOversizeThreshold, which must be smaller than the default chunk size
+// with which the LifoAlloc was initialized.
+//
+// ** LifoAllocScope (mark & release)
+//
+// As the memory cannot be reclaimed except when the LifoAlloc structure is
+// deleted, the LifoAllocScope structure is used to create scopes, related to a
+// stacked task. When going out of a LifoAllocScope the memory associated to the
+// scope is marked as unused but not reclaimed. This implies that the memory
+// allocated for one task can be reused for a similar task later on. (see
+// Safety)
+//
+// LifoAllocScope is based on mark and release functions. The mark function is
+// used to recall the offsets at which a LifoAllocScope got created. The release
+// function takes the Mark as input and will flag all memory allocated after the
+// mark creation as unused.
+//
+// When releasing all the memory of BumpChunks, these are moved to a list of
+// unused chunks which will later be reused by new allocations.
+//
+// A bump chunk allocator normally has a single bump pointers, whereas we have
+// 2. (see Oversize allocations) By doing so, we lose the ordering of allocation
+// coming from a single linked list of allocation.
+//
+// However, we rely on the ordering of allocation with LifoAllocScope, i-e when
+// mark and release functions are used. Thus the LifoAlloc::Mark is composed of
+// 2 marks, One for each singled linked list of allocations, to keep both lists
+// of allocations ordered.
+//
+// ** Infallible Allocator
+//
+// LifoAlloc can also be used as an infallible allocator. This requires the user
+// to periodically ensure that enough space has been reserved to satisfy the
+// upcoming set of allocations by calling LifoAlloc::ensureUnusedApproximate or
+// LifoAlloc::allocEnsureUnused functions. Between 2 calls of these functions,
+// functions such as allocInfallible can be used without checking against
+// nullptr, as long as there is a bounded number of such calls and that all
+// allocations including their red-zone fit in the reserved space.
+//
+// The infallible allocator mode can be toggle as being the default by calling
+// setAsInfallibleByDefault, in which case an AutoFallibleScope should be used
+// to make any large allocations. Failing to do so will raise an issue when
+// running the LifoAlloc with the OOM Simulator. (see Security features)
+//
+// * LifoAlloc::Enum Iterator
+//
+// A LifoAlloc is used for backing the store-buffer of the Garbage Collector
+// (GC). The store-buffer is appending data as it is being reported during
+// incremental GC. The LifoAlloc::Enum class is used for iterating over the set
+// of allocations made within the LifoAlloc.
+//
+// However, one must take extra care into having the proper associated types for
+// the data which are being written and read out of the LifoAlloc. The iterator
+// is reusing the same logic as the allocator in order to skip red-zones.
+//
+// At the moment, the iterator will cause a hard failure if any oversize
+// allocation are made.
+//
+// * Safety
+//
+// A LifoAlloc is neither thread-safe nor interrupt-safe. It should only be
+// manipulated in one thread of execution at a time. It can be transferred from
+// one thread to another but should not be used concurrently.
+//
+// When using LifoAllocScope, no pointer to the data allocated within a
+// LifoAllocScope should be stored in data allocated before the latest
+// LifoAllocScope. This kind of issue can hide in different forms, such as
+// appending to a Vector backed by a LifoAlloc, which can resize and move the
+// data below the LifoAllocScope. Thus causing a use-after-free once leaving a
+// LifoAllocScope.
+//
+// * Security features
+//
+// ** Single Linked List
+//
+// For sanity reasons this LifoAlloc implementation makes use of its own single
+// linked list implementation based on unique pointers (UniquePtr). The reason
+// for this is to ensure that a BumpChunk is owned only once, thus preventing
+// use-after-free issues.
+//
+// ** OOM Simulator
+//
+// The OOM simulator is controlled by the JS_OOM_BREAKPOINT macro, and used to
+// check any fallible allocation for potential OOM. Fallible functions are
+// instrumented with JS_OOM_POSSIBLY_FAIL(); function calls, and are expected to
+// return null on failures.
+//
+// Except for simulating OOMs, LifoAlloc is instrumented in DEBUG and OOM
+// Simulator builds to checks for the correctness of the Infallible Allocator
+// state. When using a LifoAlloc as an infallible allocator, enough space should
+// always be reserved for the next allocations. Therefore, to check this
+// invariant LifoAlloc::newChunkWithCapacity checks that any new chunks are
+// allocated within a fallible scope, under AutoFallibleScope.
+//
+// ** Address Sanitizers & Valgrind
+//
+// When manipulating memory in a LifoAlloc, the memory remains contiguous and
+// therefore subject to potential buffer overflow/underflow. To check for these
+// memory corruptions, the macro LIFO_HAVE_MEM_CHECK is used to add red-zones
+// with LIFO_MAKE_MEM_NOACCESS and LIFO_MAKE_MEM_UNDEFINED.
+//
+// The red-zone is a minimum space left in between 2 allocations. Any access to
+// these red-zones should warn in both valgrind / ASan builds.
+//
+// The red-zone size is defined in BumpChunk::RedZoneSize and default to 0 if
+// not instrumentation is expected, and 16 otherwise.
+//
+// ** Magic Number
+//
+// A simple sanity check is present in all BumpChunk under the form of a
+// constant field which is never mutated. the BumpChunk::magic_ is initalized to
+// the "Lif" string. Any mutation of this value indicate a memory corruption.
+//
+// This magic number is enabled in all MOZ_DIAGNOSTIC_ASSERT_ENABLED builds,
+// which implies that all Nightly and dev-edition versions of
+// Firefox/SpiderMonkey contain this instrumentation.
+//
+// ** Memory protection
+//
+// LifoAlloc chunks are holding a lot of memory. When the memory is known to be
+// unused, unchanged for some period of time, such as moving from one thread to
+// another. Then the memory can be set as read-only with LifoAlloc::setReadOnly
+// and reset as read-write with LifoAlloc::setReadWrite.
+//
+// This code is guarded by LIFO_CHUNK_PROTECT and at the moment only enabled in
+// DEBUG builds in order to avoid the fragmentation of the TLB which might run
+// out-of-memory when calling mprotect.
+//
+
+#include "js/UniquePtr.h"
+#include "util/Memory.h"
+#include "util/Poison.h"
+
+namespace js {
+
+namespace detail {
+
+template <typename T, typename D>
+class SingleLinkedList;
+
+template <typename T, typename D = JS::DeletePolicy<T>>
+class SingleLinkedListElement {
+ friend class SingleLinkedList<T, D>;
+ js::UniquePtr<T, D> next_;
+
+ public:
+ SingleLinkedListElement() : next_(nullptr) {}
+ ~SingleLinkedListElement() { MOZ_ASSERT(!next_); }
+
+ T* next() const { return next_.get(); }
+};
+
+// Single linked list which is using UniquePtr to hold the next pointers.
+// UniquePtr are used to ensure that none of the elements are used
+// silmutaneously in 2 different list.
+template <typename T, typename D = JS::DeletePolicy<T>>
+class SingleLinkedList {
+ private:
+ // First element of the list which owns the next element, and ensure that
+ // that this list is the only owner of the element.
+ UniquePtr<T, D> head_;
+
+ // Weak pointer to the last element of the list.
+ T* last_;
+
+ void assertInvariants() {
+ MOZ_ASSERT(bool(head_) == bool(last_));
+ MOZ_ASSERT_IF(last_, !last_->next_);
+ }
+
+ public:
+ SingleLinkedList() : head_(nullptr), last_(nullptr) { assertInvariants(); }
+
+ SingleLinkedList(SingleLinkedList&& other)
+ : head_(std::move(other.head_)), last_(other.last_) {
+ other.last_ = nullptr;
+ assertInvariants();
+ other.assertInvariants();
+ }
+
+ ~SingleLinkedList() {
+ MOZ_ASSERT(!head_);
+ MOZ_ASSERT(!last_);
+ }
+
+ // Move the elements of the |other| list in the current one, and implicitly
+ // remove all the elements of the current list.
+ SingleLinkedList& operator=(SingleLinkedList&& other) {
+ head_ = std::move(other.head_);
+ last_ = other.last_;
+ other.last_ = nullptr;
+ assertInvariants();
+ other.assertInvariants();
+ return *this;
+ }
+
+ bool empty() const { return !last_; }
+
+ // Iterates over the elements of the list, this is used to avoid raw
+ // manipulation of pointers as much as possible.
+ class Iterator {
+ T* current_;
+
+ public:
+ explicit Iterator(T* current) : current_(current) {}
+
+ T& operator*() const { return *current_; }
+ T* operator->() const { return current_; }
+ T* get() const { return current_; }
+
+ const Iterator& operator++() {
+ current_ = current_->next();
+ return *this;
+ }
+
+ bool operator!=(const Iterator& other) const {
+ return current_ != other.current_;
+ }
+ bool operator==(const Iterator& other) const {
+ return current_ == other.current_;
+ }
+ };
+
+ Iterator begin() const { return Iterator(head_.get()); }
+ Iterator end() const { return Iterator(nullptr); }
+ Iterator last() const { return Iterator(last_); }
+
+ // Split the list in 2 single linked lists after the element given as
+ // argument. The front of the list remains in the current list, while the
+ // back goes in the newly create linked list.
+ //
+ // This is used for example to extract one element from a list. (see
+ // LifoAlloc::getOrCreateChunk)
+ SingleLinkedList splitAfter(T* newLast) {
+ MOZ_ASSERT(newLast);
+ SingleLinkedList result;
+ if (newLast->next_) {
+ result.head_ = std::move(newLast->next_);
+ result.last_ = last_;
+ last_ = newLast;
+ }
+ assertInvariants();
+ result.assertInvariants();
+ return result;
+ }
+
+ void pushFront(UniquePtr<T, D>&& elem) {
+ if (!last_) {
+ last_ = elem.get();
+ }
+ elem->next_ = std::move(head_);
+ head_ = std::move(elem);
+ assertInvariants();
+ }
+
+ void append(UniquePtr<T, D>&& elem) {
+ if (last_) {
+ last_->next_ = std::move(elem);
+ last_ = last_->next_.get();
+ } else {
+ head_ = std::move(elem);
+ last_ = head_.get();
+ }
+ assertInvariants();
+ }
+ void appendAll(SingleLinkedList&& list) {
+ if (list.empty()) {
+ return;
+ }
+ if (last_) {
+ last_->next_ = std::move(list.head_);
+ } else {
+ head_ = std::move(list.head_);
+ }
+ last_ = list.last_;
+ list.last_ = nullptr;
+ assertInvariants();
+ list.assertInvariants();
+ }
+ void steal(SingleLinkedList&& list) {
+ head_ = std::move(list.head_);
+ last_ = list.last_;
+ list.last_ = nullptr;
+ assertInvariants();
+ list.assertInvariants();
+ }
+ void prependAll(SingleLinkedList&& list) {
+ list.appendAll(std::move(*this));
+ steal(std::move(list));
+ }
+ UniquePtr<T, D> popFirst() {
+ MOZ_ASSERT(head_);
+ UniquePtr<T, D> result = std::move(head_);
+ head_ = std::move(result->next_);
+ if (!head_) {
+ last_ = nullptr;
+ }
+ assertInvariants();
+ return result;
+ }
+};
+
+static const size_t LIFO_ALLOC_ALIGN = 8;
+
+MOZ_ALWAYS_INLINE
+uint8_t* AlignPtr(uint8_t* orig) {
+ static_assert(mozilla::IsPowerOfTwo(LIFO_ALLOC_ALIGN),
+ "LIFO_ALLOC_ALIGN must be a power of two");
+
+ uint8_t* result = (uint8_t*)AlignBytes(uintptr_t(orig), LIFO_ALLOC_ALIGN);
+ MOZ_ASSERT(uintptr_t(result) % LIFO_ALLOC_ALIGN == 0);
+ return result;
+}
+
+// A Chunk represent a single memory allocation made with the system
+// allocator. As the owner of the memory, it is responsible for the allocation
+// and the deallocation.
+//
+// This structure is only move-able, but not copyable.
+class BumpChunk : public SingleLinkedListElement<BumpChunk> {
+ private:
+ // Pointer to the last byte allocated in this chunk.
+ uint8_t* bump_;
+ // Pointer to the first byte after this chunk.
+ uint8_t* const capacity_;
+
+#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+ // Magic number used to check against poisoned values.
+ const uintptr_t magic_ : 24;
+ static constexpr uintptr_t magicNumber = uintptr_t(0x4c6966);
+#endif
+
+#if defined(DEBUG)
+# define LIFO_CHUNK_PROTECT 1
+#endif
+
+ // Poison the memory with memset, in order to catch errors due to
+ // use-after-free, with JS_LIFO_UNDEFINED_PATTERN pattern, or to catch
+ // use-before-init with JS_LIFO_UNINITIALIZED_PATTERN.
+#if defined(DEBUG)
+# define LIFO_HAVE_MEM_CHECKS 1
+# define LIFO_MAKE_MEM_NOACCESS(addr, size) \
+ do { \
+ uint8_t* base = (addr); \
+ size_t sz = (size); \
+ MOZ_MAKE_MEM_UNDEFINED(base, sz); \
+ memset(base, JS_LIFO_UNDEFINED_PATTERN, sz); \
+ MOZ_MAKE_MEM_NOACCESS(base, sz); \
+ } while (0)
+
+# define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
+ do { \
+ uint8_t* base = (addr); \
+ size_t sz = (size); \
+ MOZ_MAKE_MEM_UNDEFINED(base, sz); \
+ memset(base, JS_LIFO_UNINITIALIZED_PATTERN, sz); \
+ MOZ_MAKE_MEM_UNDEFINED(base, sz); \
+ } while (0)
+
+#elif defined(MOZ_HAVE_MEM_CHECKS)
+# define LIFO_HAVE_MEM_CHECKS 1
+# define LIFO_MAKE_MEM_NOACCESS(addr, size) \
+ MOZ_MAKE_MEM_NOACCESS((addr), (size))
+# define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
+ MOZ_MAKE_MEM_UNDEFINED((addr), (size))
+#endif
+
+#ifdef LIFO_HAVE_MEM_CHECKS
+ // Red zone reserved after each allocation.
+ static constexpr size_t RedZoneSize = 16;
+#else
+ static constexpr size_t RedZoneSize = 0;
+#endif
+
+ void assertInvariants() {
+ MOZ_DIAGNOSTIC_ASSERT(magic_ == magicNumber);
+ MOZ_ASSERT(begin() <= end());
+ MOZ_ASSERT(end() <= capacity_);
+ }
+
+ BumpChunk& operator=(const BumpChunk&) = delete;
+ BumpChunk(const BumpChunk&) = delete;
+
+ explicit BumpChunk(uintptr_t capacity)
+ : bump_(begin()),
+ capacity_(base() + capacity)
+#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+ ,
+ magic_(magicNumber)
+#endif
+ {
+ assertInvariants();
+#if defined(LIFO_HAVE_MEM_CHECKS)
+ // The memory is freshly allocated and marked as undefined by the
+ // allocator of the BumpChunk. Instead, we mark this memory as
+ // no-access, as it has not been allocated within the BumpChunk.
+ LIFO_MAKE_MEM_NOACCESS(bump_, capacity_ - bump_);
+#endif
+ }
+
+ // Cast |this| into a uint8_t* pointer.
+ //
+ // Warning: Are you sure you do not want to use begin() instead?
+ const uint8_t* base() const { return reinterpret_cast<const uint8_t*>(this); }
+ uint8_t* base() { return reinterpret_cast<uint8_t*>(this); }
+
+ // Update the bump pointer to any value contained in this chunk, which is
+ // above the private fields of this chunk.
+ //
+ // The memory is poisoned and flagged as no-access when the bump pointer is
+ // moving backward, such as when memory is released, or when a Mark is used
+ // to unwind previous allocations.
+ //
+ // The memory is flagged as undefined when the bump pointer is moving
+ // forward.
+ void setBump(uint8_t* newBump) {
+ assertInvariants();
+ MOZ_ASSERT(begin() <= newBump);
+ MOZ_ASSERT(newBump <= capacity_);
+#if defined(LIFO_HAVE_MEM_CHECKS)
+ // Poison/Unpoison memory that we just free'd/allocated.
+ if (bump_ > newBump) {
+ LIFO_MAKE_MEM_NOACCESS(newBump, bump_ - newBump);
+ } else if (newBump > bump_) {
+ MOZ_ASSERT(newBump - RedZoneSize >= bump_);
+ LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - RedZoneSize - bump_);
+ // The area [newBump - RedZoneSize .. newBump[ is already flagged as
+ // no-access either with the previous if-branch or with the
+ // BumpChunk constructor. No need to mark it twice.
+ }
+#endif
+ bump_ = newBump;
+ }
+
+ public:
+ ~BumpChunk() { release(); }
+
+ // Returns true if this chunk contains no allocated content.
+ bool empty() const { return end() == begin(); }
+
+ // Returns the size in bytes of the number of allocated space. This includes
+ // the size consumed by the alignment of the allocations.
+ size_t used() const { return end() - begin(); }
+
+ // These are used for manipulating a chunk as if it was a vector of bytes,
+ // and used for iterating over the content of the buffer (see
+ // LifoAlloc::Enum)
+ inline const uint8_t* begin() const;
+ inline uint8_t* begin();
+ uint8_t* end() const { return bump_; }
+
+ // This function is the only way to allocate and construct a chunk. It
+ // returns a UniquePtr to the newly allocated chunk. The size given as
+ // argument includes the space needed for the header of the chunk.
+ static UniquePtr<BumpChunk> newWithCapacity(size_t size);
+
+ // Report allocation.
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this);
+ }
+
+ // Report allocation size.
+ size_t computedSizeOfIncludingThis() const { return capacity_ - base(); }
+
+ // Opaque type used to carry a pointer to the current location of the bump_
+ // pointer, within a BumpChunk.
+ class Mark {
+ // Chunk which owns the pointer.
+ BumpChunk* chunk_;
+ // Recorded of the bump_ pointer of the BumpChunk.
+ uint8_t* bump_;
+
+ friend class BumpChunk;
+ Mark(BumpChunk* chunk, uint8_t* bump) : chunk_(chunk), bump_(bump) {}
+
+ public:
+ Mark() : chunk_(nullptr), bump_(nullptr) {}
+
+ BumpChunk* markedChunk() const { return chunk_; }
+ };
+
+ // Return a mark to be able to unwind future allocations with the release
+ // function. (see LifoAllocScope)
+ Mark mark() { return Mark(this, end()); }
+
+ // Check if a pointer is part of the allocated data of this chunk.
+ bool contains(const void* ptr) const {
+ // Note: We cannot check "ptr < end()" because the mark have a 0-size
+ // length.
+ return begin() <= ptr && ptr <= end();
+ }
+
+ // Check if a mark is part of the allocated data of this chunk.
+ bool contains(Mark m) const {
+ MOZ_ASSERT(m.chunk_ == this);
+ return contains(m.bump_);
+ }
+
+ // Release the memory allocated in this chunk. This function does not call
+ // any of the destructors.
+ void release() { setBump(begin()); }
+
+ // Release the memory allocated in this chunk since the corresponding mark
+ // got created. This function does not call any of the destructors.
+ void release(Mark m) {
+ MOZ_RELEASE_ASSERT(contains(m));
+ setBump(m.bump_);
+ }
+
+ // Given an amount, compute the total size of a chunk for it: reserved
+ // space before |begin()|, space for |amount| bytes, and red-zone space
+ // after those bytes that will ultimately end at |capacity_|.
+ [[nodiscard]] static inline bool allocSizeWithRedZone(size_t amount,
+ size_t* size);
+
+ // Given a bump chunk pointer, find the next base/end pointers. This is
+ // useful for having consistent allocations, and iterating over known size
+ // allocations.
+ static uint8_t* nextAllocBase(uint8_t* e) { return detail::AlignPtr(e); }
+ static uint8_t* nextAllocEnd(uint8_t* b, size_t n) {
+ return b + n + RedZoneSize;
+ }
+
+ // Returns true, if the unused space is large enough for an allocation of
+ // |n| bytes.
+ bool canAlloc(size_t n) const {
+ uint8_t* newBump = nextAllocEnd(nextAllocBase(end()), n);
+ // bump_ <= newBump, is necessary to catch overflow.
+ return bump_ <= newBump && newBump <= capacity_;
+ }
+
+ // Space remaining in the current chunk.
+ size_t unused() const {
+ uint8_t* aligned = nextAllocBase(end());
+ if (aligned < capacity_) {
+ return capacity_ - aligned;
+ }
+ return 0;
+ }
+
+ // Try to perform an allocation of size |n|, returns nullptr if not possible.
+ MOZ_ALWAYS_INLINE
+ void* tryAlloc(size_t n) {
+ uint8_t* aligned = nextAllocBase(end());
+ uint8_t* newBump = nextAllocEnd(aligned, n);
+
+ if (newBump > capacity_) {
+ return nullptr;
+ }
+
+ // Check for overflow.
+ if (MOZ_UNLIKELY(newBump < bump_)) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(canAlloc(n)); // Ensure consistency between "can" and "try".
+ setBump(newBump);
+ return aligned;
+ }
+
+#ifdef LIFO_CHUNK_PROTECT
+ void setReadOnly();
+ void setReadWrite();
+#else
+ void setReadOnly() const {}
+ void setReadWrite() const {}
+#endif
+};
+
+// Space reserved for the BumpChunk internal data, and the alignment of the
+// first allocation content. This can be used to ensure there is enough space
+// for the next allocation (see LifoAlloc::newChunkWithCapacity).
+static constexpr size_t BumpChunkReservedSpace =
+ AlignBytes(sizeof(BumpChunk), LIFO_ALLOC_ALIGN);
+
+[[nodiscard]] /* static */ inline bool BumpChunk::allocSizeWithRedZone(
+ size_t amount, size_t* size) {
+ constexpr size_t SpaceBefore = BumpChunkReservedSpace;
+ static_assert((SpaceBefore % LIFO_ALLOC_ALIGN) == 0,
+ "reserved space presumed already aligned");
+
+ constexpr size_t SpaceAfter = RedZoneSize; // may be zero
+
+ constexpr size_t SpaceBeforeAndAfter = SpaceBefore + SpaceAfter;
+ static_assert(SpaceBeforeAndAfter >= SpaceBefore,
+ "intermediate addition must not overflow");
+
+ *size = SpaceBeforeAndAfter + amount;
+ return MOZ_LIKELY(*size >= SpaceBeforeAndAfter);
+}
+
+inline const uint8_t* BumpChunk::begin() const {
+ return base() + BumpChunkReservedSpace;
+}
+
+inline uint8_t* BumpChunk::begin() { return base() + BumpChunkReservedSpace; }
+
+} // namespace detail
+
+// LIFO bump allocator: used for phase-oriented and fast LIFO allocations.
+//
+// Note: We leave BumpChunks latent in the set of unused chunks after they've
+// been released to avoid thrashing before a GC.
+class LifoAlloc {
+ using UniqueBumpChunk = js::UniquePtr<detail::BumpChunk>;
+ using BumpChunkList = detail::SingleLinkedList<detail::BumpChunk>;
+
+ // List of chunks containing allocated data of size smaller than the default
+ // chunk size. In the common case, the last chunk of this list is always
+ // used to perform the allocations. When the allocation cannot be performed,
+ // we move a Chunk from the unused set to the list of used chunks.
+ BumpChunkList chunks_;
+
+ // List of chunks containing allocated data where each allocation is larger
+ // than the oversize threshold. Each chunk contains exactly one allocation.
+ // This reduces wasted space in the chunk list.
+ //
+ // Oversize chunks are allocated on demand and freed as soon as they are
+ // released, instead of being pushed to the unused list.
+ BumpChunkList oversize_;
+
+ // Set of unused chunks, which can be reused for future allocations.
+ BumpChunkList unused_;
+
+ size_t markCount;
+ size_t defaultChunkSize_;
+ size_t oversizeThreshold_;
+
+ // Size of all chunks in chunks_, oversize_, unused_ lists.
+ size_t curSize_;
+ size_t peakSize_;
+
+ // Size of all chunks containing small bump allocations. This heuristic is
+ // used to compute growth rate while ignoring chunks such as oversized,
+ // now-unused, or transferred (which followed their own growth patterns).
+ size_t smallAllocsSize_;
+
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ bool fallibleScope_;
+#endif
+
+ void operator=(const LifoAlloc&) = delete;
+ LifoAlloc(const LifoAlloc&) = delete;
+
+ // Return a BumpChunk that can perform an allocation of at least size |n|.
+ UniqueBumpChunk newChunkWithCapacity(size_t n, bool oversize);
+
+ // Reuse or allocate a BumpChunk that can perform an allocation of at least
+ // size |n|, if successful it is placed at the end the list of |chunks_|.
+ UniqueBumpChunk getOrCreateChunk(size_t n);
+
+ void reset(size_t defaultChunkSize);
+
+ // Append unused chunks to the end of this LifoAlloc.
+ void appendUnused(BumpChunkList&& otherUnused) {
+#ifdef DEBUG
+ for (detail::BumpChunk& bc : otherUnused) {
+ MOZ_ASSERT(bc.empty());
+ }
+#endif
+ unused_.appendAll(std::move(otherUnused));
+ }
+
+ // Append used chunks to the end of this LifoAlloc. We act as if all the
+ // chunks in |this| are used, even if they're not, so memory may be wasted.
+ void appendUsed(BumpChunkList&& otherChunks) {
+ chunks_.appendAll(std::move(otherChunks));
+ }
+
+ // Track the amount of space allocated in used and unused chunks.
+ void incrementCurSize(size_t size) {
+ curSize_ += size;
+ if (curSize_ > peakSize_) {
+ peakSize_ = curSize_;
+ }
+ }
+ void decrementCurSize(size_t size) {
+ MOZ_ASSERT(curSize_ >= size);
+ curSize_ -= size;
+ MOZ_ASSERT(curSize_ >= smallAllocsSize_);
+ }
+
+ void* allocImplColdPath(size_t n);
+ void* allocImplOversize(size_t n);
+
+ MOZ_ALWAYS_INLINE
+ void* allocImpl(size_t n) {
+ void* result;
+ // Give oversized allocations their own chunk instead of wasting space
+ // due to fragmentation at the end of normal chunk.
+ if (MOZ_UNLIKELY(n > oversizeThreshold_)) {
+ return allocImplOversize(n);
+ }
+ if (MOZ_LIKELY(!chunks_.empty() &&
+ (result = chunks_.last()->tryAlloc(n)))) {
+ return result;
+ }
+ return allocImplColdPath(n);
+ }
+
+ // Check for space in unused chunks or allocate a new unused chunk.
+ [[nodiscard]] bool ensureUnusedApproximateColdPath(size_t n, size_t total);
+
+ public:
+ explicit LifoAlloc(size_t defaultChunkSize)
+ : peakSize_(0)
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ ,
+ fallibleScope_(true)
+#endif
+ {
+ reset(defaultChunkSize);
+ }
+
+ // Set the threshold to allocate data in its own chunk outside the space for
+ // small allocations.
+ void disableOversize() { oversizeThreshold_ = SIZE_MAX; }
+ void setOversizeThreshold(size_t oversizeThreshold) {
+ MOZ_ASSERT(oversizeThreshold <= defaultChunkSize_);
+ oversizeThreshold_ = oversizeThreshold;
+ }
+
+ // Steal allocated chunks from |other|.
+ void steal(LifoAlloc* other);
+
+ // Append all chunks from |other|. They are removed from |other|.
+ void transferFrom(LifoAlloc* other);
+
+ // Append unused chunks from |other|. They are removed from |other|.
+ void transferUnusedFrom(LifoAlloc* other);
+
+ ~LifoAlloc() { freeAll(); }
+
+ size_t defaultChunkSize() const { return defaultChunkSize_; }
+
+ // Frees all held memory.
+ void freeAll();
+
+ static const unsigned HUGE_ALLOCATION = 50 * 1024 * 1024;
+ void freeAllIfHugeAndUnused() {
+ if (markCount == 0 && curSize_ > HUGE_ALLOCATION) {
+ freeAll();
+ }
+ }
+
+ MOZ_ALWAYS_INLINE
+ void* alloc(size_t n) {
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ // Only simulate OOMs when we are not using the LifoAlloc as an
+ // infallible allocator.
+ if (fallibleScope_) {
+ JS_OOM_POSSIBLY_FAIL();
+ }
+#endif
+ return allocImpl(n);
+ }
+
+ // Allocates |n| bytes if we can guarantee that we will have
+ // |needed| unused bytes remaining. Returns nullptr if we can't.
+ // This is useful for maintaining our ballast invariants while
+ // attempting fallible allocations.
+ MOZ_ALWAYS_INLINE
+ void* allocEnsureUnused(size_t n, size_t needed) {
+ JS_OOM_POSSIBLY_FAIL();
+ MOZ_ASSERT(fallibleScope_);
+
+ Mark m = mark();
+ void* result = allocImpl(n);
+ if (!ensureUnusedApproximate(needed)) {
+ release(m);
+ return nullptr;
+ }
+ cancelMark(m);
+ return result;
+ }
+
+ template <typename T, typename... Args>
+ MOZ_ALWAYS_INLINE T* newWithSize(size_t n, Args&&... args) {
+ MOZ_ASSERT(n >= sizeof(T), "must request enough space to store a T");
+ static_assert(alignof(T) <= detail::LIFO_ALLOC_ALIGN,
+ "LifoAlloc must provide enough alignment to store T");
+ void* ptr = alloc(n);
+ if (!ptr) {
+ return nullptr;
+ }
+
+ return new (ptr) T(std::forward<Args>(args)...);
+ }
+
+ MOZ_ALWAYS_INLINE
+ void* allocInfallible(size_t n) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (void* result = allocImpl(n)) {
+ return result;
+ }
+ oomUnsafe.crash("LifoAlloc::allocInfallible");
+ return nullptr;
+ }
+
+ // Ensures that enough space exists to satisfy N bytes worth of
+ // allocation requests, not necessarily contiguous. Note that this does
+ // not guarantee a successful single allocation of N bytes.
+ [[nodiscard]] MOZ_ALWAYS_INLINE bool ensureUnusedApproximate(size_t n) {
+ AutoFallibleScope fallibleAllocator(this);
+ size_t total = 0;
+ if (!chunks_.empty()) {
+ total += chunks_.last()->unused();
+ if (total >= n) {
+ return true;
+ }
+ }
+
+ return ensureUnusedApproximateColdPath(n, total);
+ }
+
+ MOZ_ALWAYS_INLINE
+ void setAsInfallibleByDefault() {
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ fallibleScope_ = false;
+#endif
+ }
+
+ class MOZ_NON_TEMPORARY_CLASS AutoFallibleScope {
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ LifoAlloc* lifoAlloc_;
+ bool prevFallibleScope_;
+
+ public:
+ explicit AutoFallibleScope(LifoAlloc* lifoAlloc) {
+ lifoAlloc_ = lifoAlloc;
+ prevFallibleScope_ = lifoAlloc->fallibleScope_;
+ lifoAlloc->fallibleScope_ = true;
+ }
+
+ ~AutoFallibleScope() { lifoAlloc_->fallibleScope_ = prevFallibleScope_; }
+#else
+ public:
+ explicit AutoFallibleScope(LifoAlloc*) {}
+#endif
+ };
+
+ template <typename T>
+ T* newArray(size_t count) {
+ static_assert(std::is_trivial_v<T>,
+ "T must be trivially constructible so that constructors need "
+ "not be called");
+ static_assert(std::is_trivially_destructible_v<T>,
+ "T must be trivially destructible so destructors don't need "
+ "to be called when the LifoAlloc is freed");
+ return newArrayUninitialized<T>(count);
+ }
+
+ // Create an array with uninitialized elements of type |T|.
+ // The caller is responsible for initialization.
+ template <typename T>
+ T* newArrayUninitialized(size_t count) {
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes))) {
+ return nullptr;
+ }
+ return static_cast<T*>(alloc(bytes));
+ }
+
+ class Mark {
+ friend class LifoAlloc;
+ detail::BumpChunk::Mark chunk;
+ detail::BumpChunk::Mark oversize;
+ };
+
+ // Note: MOZ_NEVER_INLINE is a work around for a Clang 9 (PGO) miscompilation.
+ // See bug 1583907.
+ MOZ_NEVER_INLINE Mark mark();
+
+ void release(Mark mark);
+
+ private:
+ void cancelMark(Mark mark) { markCount--; }
+
+ public:
+ void releaseAll() {
+ MOZ_ASSERT(!markCount);
+
+ // When releasing all chunks, we can no longer determine which chunks were
+ // transferred and which were not, so simply clear the heuristic to zero
+ // right away.
+ smallAllocsSize_ = 0;
+
+ for (detail::BumpChunk& bc : chunks_) {
+ bc.release();
+ }
+ unused_.appendAll(std::move(chunks_));
+
+ // On release, we free any oversize allocations instead of keeping them
+ // in unused chunks.
+ while (!oversize_.empty()) {
+ UniqueBumpChunk bc = oversize_.popFirst();
+ decrementCurSize(bc->computedSizeOfIncludingThis());
+ }
+ }
+
+ // Protect the content of the LifoAlloc chunks.
+#ifdef LIFO_CHUNK_PROTECT
+ void setReadOnly();
+ void setReadWrite();
+#else
+ void setReadOnly() const {}
+ void setReadWrite() const {}
+#endif
+
+ // Get the total "used" (occupied bytes) count for the arena chunks.
+ size_t used() const {
+ size_t accum = 0;
+ for (const detail::BumpChunk& chunk : chunks_) {
+ accum += chunk.used();
+ }
+ return accum;
+ }
+
+ // Return true if the LifoAlloc does not currently contain any allocations.
+ bool isEmpty() const {
+ bool empty = chunks_.empty() ||
+ (chunks_.begin() == chunks_.last() && chunks_.last()->empty());
+ MOZ_ASSERT_IF(!oversize_.empty(), !oversize_.last()->empty());
+ return empty && oversize_.empty();
+ }
+
+ // Return the number of bytes remaining to allocate in the current chunk.
+ // e.g. How many bytes we can allocate before needing a new block.
+ size_t availableInCurrentChunk() const {
+ if (chunks_.empty()) {
+ return 0;
+ }
+ return chunks_.last()->unused();
+ }
+
+ // Get the total size of the arena chunks (including unused space).
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t n = 0;
+ for (const detail::BumpChunk& chunk : chunks_) {
+ n += chunk.sizeOfIncludingThis(mallocSizeOf);
+ }
+ for (const detail::BumpChunk& chunk : oversize_) {
+ n += chunk.sizeOfIncludingThis(mallocSizeOf);
+ }
+ for (const detail::BumpChunk& chunk : unused_) {
+ n += chunk.sizeOfIncludingThis(mallocSizeOf);
+ }
+ return n;
+ }
+
+ // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself.
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ // Get the current size of the arena chunks (including unused space and
+ // bookkeeping space).
+ size_t computedSizeOfExcludingThis() const { return curSize_; }
+
+ // Get the peak size of the arena chunks (including unused space and
+ // bookkeeping space).
+ size_t peakSizeOfExcludingThis() const { return peakSize_; }
+
+ // Doesn't perform construction; useful for lazily-initialized POD types.
+ template <typename T>
+ MOZ_ALWAYS_INLINE T* pod_malloc() {
+ return static_cast<T*>(alloc(sizeof(T)));
+ }
+
+ JS_DECLARE_NEW_METHODS(new_, alloc, MOZ_ALWAYS_INLINE)
+ JS_DECLARE_NEW_METHODS(newInfallible, allocInfallible, MOZ_ALWAYS_INLINE)
+
+#ifdef DEBUG
+ bool contains(const void* ptr) const {
+ for (const detail::BumpChunk& chunk : chunks_) {
+ if (chunk.contains(ptr)) {
+ return true;
+ }
+ }
+ for (const detail::BumpChunk& chunk : oversize_) {
+ if (chunk.contains(ptr)) {
+ return true;
+ }
+ }
+ return false;
+ }
+#endif
+
+ // Iterate over the data allocated in a LifoAlloc, and interpret it.
+ class Enum {
+ friend class LifoAlloc;
+ friend class detail::BumpChunk;
+
+ // Iterator over the list of bump chunks.
+ BumpChunkList::Iterator chunkIt_;
+ BumpChunkList::Iterator chunkEnd_;
+ // Read head (must be within chunk_).
+ uint8_t* head_;
+
+ // If there is not enough room in the remaining block for |size|,
+ // advance to the next block and update the position.
+ uint8_t* seekBaseAndAdvanceBy(size_t size) {
+ MOZ_ASSERT(!empty());
+
+ uint8_t* aligned = detail::BumpChunk::nextAllocBase(head_);
+ if (detail::BumpChunk::nextAllocEnd(aligned, size) > chunkIt_->end()) {
+ ++chunkIt_;
+ aligned = chunkIt_->begin();
+ // The current code assumes that if we have a chunk, then we
+ // have allocated something it in.
+ MOZ_ASSERT(!chunkIt_->empty());
+ }
+ head_ = detail::BumpChunk::nextAllocEnd(aligned, size);
+ MOZ_ASSERT(head_ <= chunkIt_->end());
+ return aligned;
+ }
+
+ public:
+ explicit Enum(LifoAlloc& alloc)
+ : chunkIt_(alloc.chunks_.begin()),
+ chunkEnd_(alloc.chunks_.end()),
+ head_(nullptr) {
+ MOZ_RELEASE_ASSERT(alloc.oversize_.empty());
+ if (chunkIt_ != chunkEnd_) {
+ head_ = chunkIt_->begin();
+ }
+ }
+
+ // Return true if there are no more bytes to enumerate.
+ bool empty() {
+ return chunkIt_ == chunkEnd_ ||
+ (chunkIt_->next() == chunkEnd_.get() && head_ >= chunkIt_->end());
+ }
+
+ // Move the read position forward by the size of one T.
+ template <typename T>
+ T* read(size_t size = sizeof(T)) {
+ return reinterpret_cast<T*>(read(size));
+ }
+
+ // Return a pointer to the item at the current position. This returns a
+ // pointer to the inline storage, not a copy, and moves the read-head by
+ // the requested |size|.
+ void* read(size_t size) { return seekBaseAndAdvanceBy(size); }
+ };
+};
+
+class MOZ_NON_TEMPORARY_CLASS LifoAllocScope {
+ LifoAlloc* lifoAlloc;
+ LifoAlloc::Mark mark;
+ LifoAlloc::AutoFallibleScope fallibleScope;
+
+ public:
+ explicit LifoAllocScope(LifoAlloc* lifoAlloc)
+ : lifoAlloc(lifoAlloc),
+ mark(lifoAlloc->mark()),
+ fallibleScope(lifoAlloc) {}
+
+ ~LifoAllocScope() {
+ lifoAlloc->release(mark);
+
+ /*
+ * The parser can allocate enormous amounts of memory for large functions.
+ * Eagerly free the memory now (which otherwise won't be freed until the
+ * next GC) to avoid unnecessary OOMs.
+ */
+ lifoAlloc->freeAllIfHugeAndUnused();
+ }
+
+ LifoAlloc& alloc() { return *lifoAlloc; }
+};
+
+enum Fallibility { Fallible, Infallible };
+
+template <Fallibility fb>
+class LifoAllocPolicy {
+ LifoAlloc& alloc_;
+
+ public:
+ MOZ_IMPLICIT LifoAllocPolicy(LifoAlloc& alloc) : alloc_(alloc) {}
+ template <typename T>
+ T* maybe_pod_malloc(size_t numElems) {
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(numElems, &bytes))) {
+ return nullptr;
+ }
+ void* p =
+ fb == Fallible ? alloc_.alloc(bytes) : alloc_.allocInfallible(bytes);
+ return static_cast<T*>(p);
+ }
+ template <typename T>
+ T* maybe_pod_calloc(size_t numElems) {
+ T* p = maybe_pod_malloc<T>(numElems);
+ if (MOZ_UNLIKELY(!p)) {
+ return nullptr;
+ }
+ memset(p, 0, numElems * sizeof(T));
+ return p;
+ }
+ template <typename T>
+ T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
+ T* n = maybe_pod_malloc<T>(newSize);
+ if (MOZ_UNLIKELY(!n)) {
+ return nullptr;
+ }
+ MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
+ memcpy(n, p, std::min(oldSize * sizeof(T), newSize * sizeof(T)));
+ return n;
+ }
+ template <typename T>
+ T* pod_malloc(size_t numElems) {
+ return maybe_pod_malloc<T>(numElems);
+ }
+ template <typename T>
+ T* pod_calloc(size_t numElems) {
+ return maybe_pod_calloc<T>(numElems);
+ }
+ template <typename T>
+ T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
+ return maybe_pod_realloc<T>(p, oldSize, newSize);
+ }
+ template <typename T>
+ void free_(T* p, size_t numElems) {}
+ void reportAllocOverflow() const {}
+ [[nodiscard]] bool checkSimulatedOOM() const {
+ return fb == Infallible || !js::oom::ShouldFailWithOOM();
+ }
+};
+
+} // namespace js
+
+#endif /* ds_LifoAlloc_h */
diff --git a/js/src/ds/Nestable.h b/js/src/ds/Nestable.h
new file mode 100644
index 0000000000..c27f7da534
--- /dev/null
+++ b/js/src/ds/Nestable.h
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_Nestable_h
+#define ds_Nestable_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+namespace js {
+
+// A base class for nestable structures.
+template <typename Concrete>
+class MOZ_STACK_CLASS Nestable {
+ Concrete** stack_;
+ Concrete* enclosing_;
+
+ protected:
+ explicit Nestable(Concrete** stack) : stack_(stack), enclosing_(*stack) {
+ *stack_ = static_cast<Concrete*>(this);
+ }
+
+ // These method are protected. Some derived classes, such as ParseContext,
+ // do not expose the ability to walk the stack.
+ Concrete* enclosing() const { return enclosing_; }
+
+ template <typename Predicate /* (Concrete*) -> bool */>
+ static Concrete* findNearest(Concrete* it, Predicate predicate) {
+ while (it && !predicate(it)) {
+ it = it->enclosing();
+ }
+ return it;
+ }
+
+ template <typename T>
+ static T* findNearest(Concrete* it) {
+ while (it && !it->template is<T>()) {
+ it = it->enclosing();
+ }
+ return it ? &it->template as<T>() : nullptr;
+ }
+
+ template <typename T, typename Predicate /* (T*) -> bool */>
+ static T* findNearest(Concrete* it, Predicate predicate) {
+ while (it && (!it->template is<T>() || !predicate(&it->template as<T>()))) {
+ it = it->enclosing();
+ }
+ return it ? &it->template as<T>() : nullptr;
+ }
+
+ public:
+ ~Nestable() {
+ MOZ_ASSERT(*stack_ == static_cast<Concrete*>(this));
+ *stack_ = enclosing_;
+ }
+};
+
+} // namespace js
+
+#endif /* ds_Nestable_h */
diff --git a/js/src/ds/OrderedHashTable.h b/js/src/ds/OrderedHashTable.h
new file mode 100644
index 0000000000..3ef366abc3
--- /dev/null
+++ b/js/src/ds/OrderedHashTable.h
@@ -0,0 +1,1100 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_OrderedHashTable_h
+#define ds_OrderedHashTable_h
+
+/*
+ * Define two collection templates, js::OrderedHashMap and js::OrderedHashSet.
+ * They are like js::HashMap and js::HashSet except that:
+ *
+ * - Iterating over an Ordered hash table visits the entries in the order in
+ * which they were inserted. This means that unlike a HashMap, the behavior
+ * of an OrderedHashMap is deterministic (as long as the HashPolicy methods
+ * are effect-free and consistent); the hashing is a pure performance
+ * optimization.
+ *
+ * - Range objects over Ordered tables remain valid even when entries are
+ * added or removed or the table is resized. (However in the case of
+ * removing entries, note the warning on class Range below.)
+ *
+ * - The API is a little different, so it's not a drop-in replacement.
+ * In particular, the hash policy is a little different.
+ * Also, the Ordered templates lack the Ptr and AddPtr types.
+ *
+ * Hash policies
+ *
+ * See the comment about "Hash policy" in HashTable.h for general features that
+ * hash policy classes must provide. Hash policies for OrderedHashMaps and Sets
+ * differ in that the hash() method takes an extra argument:
+ * static js::HashNumber hash(Lookup, const HashCodeScrambler&);
+ * They must additionally provide a distinguished "empty" key value and the
+ * following static member functions:
+ * bool isEmpty(const Key&);
+ * void makeEmpty(Key*);
+ */
+
+#include "mozilla/HashFunctions.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/TemplateLib.h"
+
+#include <tuple>
+#include <utility>
+
+#include "gc/Barrier.h"
+#include "js/GCPolicyAPI.h"
+#include "js/HashTable.h"
+
+class JSTracer;
+
+namespace js {
+
+namespace detail {
+
+/*
+ * detail::OrderedHashTable is the underlying data structure used to implement
+ * both OrderedHashMap and OrderedHashSet. Programs should use one of those two
+ * templates rather than OrderedHashTable.
+ */
+template <class T, class Ops, class AllocPolicy>
+class OrderedHashTable {
+ public:
+ using Key = typename Ops::KeyType;
+ using Lookup = typename Ops::Lookup;
+
+ struct Data {
+ T element;
+ Data* chain;
+
+ Data(const T& e, Data* c) : element(e), chain(c) {}
+ Data(T&& e, Data* c) : element(std::move(e)), chain(c) {}
+ };
+
+ class Range;
+ friend class Range;
+
+ private:
+ Data** hashTable; // hash table (has hashBuckets() elements)
+ Data* data; // data vector, an array of Data objects
+ // data[0:dataLength] are constructed
+ uint32_t dataLength; // number of constructed elements in data
+ uint32_t dataCapacity; // size of data, in elements
+ uint32_t liveCount; // dataLength less empty (removed) entries
+ uint32_t hashShift; // multiplicative hash shift
+ Range* ranges; // list of all live Ranges on this table in malloc memory
+ Range*
+ nurseryRanges; // list of all live Ranges on this table in the GC nursery
+ AllocPolicy alloc;
+ mozilla::HashCodeScrambler hcs; // don't reveal pointer hash codes
+
+ // TODO: This should be templated on a functor type and receive lambda
+ // arguments but this causes problems for the hazard analysis builds. See
+ // bug 1398213.
+ template <void (*f)(Range* range, uint32_t arg)>
+ void forEachRange(uint32_t arg = 0) {
+ Range* next;
+ for (Range* r = ranges; r; r = next) {
+ next = r->next;
+ f(r, arg);
+ }
+ for (Range* r = nurseryRanges; r; r = next) {
+ next = r->next;
+ f(r, arg);
+ }
+ }
+
+ public:
+ OrderedHashTable(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
+ : hashTable(nullptr),
+ data(nullptr),
+ dataLength(0),
+ dataCapacity(0),
+ liveCount(0),
+ hashShift(0),
+ ranges(nullptr),
+ nurseryRanges(nullptr),
+ alloc(std::move(ap)),
+ hcs(hcs) {}
+
+ [[nodiscard]] bool init() {
+ MOZ_ASSERT(!hashTable, "init must be called at most once");
+
+ uint32_t buckets = initialBuckets();
+ Data** tableAlloc = alloc.template pod_malloc<Data*>(buckets);
+ if (!tableAlloc) {
+ return false;
+ }
+ for (uint32_t i = 0; i < buckets; i++) {
+ tableAlloc[i] = nullptr;
+ }
+
+ uint32_t capacity = uint32_t(buckets * fillFactor());
+ Data* dataAlloc = alloc.template pod_malloc<Data>(capacity);
+ if (!dataAlloc) {
+ alloc.free_(tableAlloc, buckets);
+ return false;
+ }
+
+ // clear() requires that members are assigned only after all allocation
+ // has succeeded, and that this->ranges is left untouched.
+ hashTable = tableAlloc;
+ data = dataAlloc;
+ dataLength = 0;
+ dataCapacity = capacity;
+ liveCount = 0;
+ hashShift = js::kHashNumberBits - initialBucketsLog2();
+ MOZ_ASSERT(hashBuckets() == buckets);
+ return true;
+ }
+
+ ~OrderedHashTable() {
+ forEachRange<Range::onTableDestroyed>();
+ if (hashTable) {
+ // |hashBuckets()| isn't valid when |hashTable| hasn't been created.
+ alloc.free_(hashTable, hashBuckets());
+ }
+ freeData(data, dataLength, dataCapacity);
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t size = 0;
+ if (hashTable) {
+ size += mallocSizeOf(hashTable);
+ }
+ if (data) {
+ size += mallocSizeOf(data);
+ }
+ return size;
+ }
+
+ /* Return the number of elements in the table. */
+ uint32_t count() const { return liveCount; }
+
+ /* True if any element matches l. */
+ bool has(const Lookup& l) const { return lookup(l) != nullptr; }
+
+ /* Return a pointer to the element, if any, that matches l, or nullptr. */
+ T* get(const Lookup& l) {
+ Data* e = lookup(l, prepareHash(l));
+ return e ? &e->element : nullptr;
+ }
+
+ /* Return a pointer to the element, if any, that matches l, or nullptr. */
+ const T* get(const Lookup& l) const {
+ return const_cast<OrderedHashTable*>(this)->get(l);
+ }
+
+ /*
+ * If the table already contains an entry that matches |element|,
+ * replace that entry with |element|. Otherwise add a new entry.
+ *
+ * On success, return true, whether there was already a matching element or
+ * not. On allocation failure, return false. If this returns false, it
+ * means the element was not added to the table.
+ */
+ template <typename ElementInput>
+ [[nodiscard]] bool put(ElementInput&& element) {
+ HashNumber h = prepareHash(Ops::getKey(element));
+ if (Data* e = lookup(Ops::getKey(element), h)) {
+ e->element = std::forward<ElementInput>(element);
+ return true;
+ }
+
+ if (dataLength == dataCapacity && !rehashOnFull()) {
+ return false;
+ }
+
+ auto [entry, chain] = addEntry(h);
+ new (entry) Data(std::forward<ElementInput>(element), chain);
+ return true;
+ }
+
+ /*
+ * If the table contains an entry that matches |element| then return a pointer
+ * to it, otherwise add a new entry.
+ */
+ template <typename ElementInput>
+ [[nodiscard]] T* getOrAdd(ElementInput&& element) {
+ HashNumber h = prepareHash(Ops::getKey(element));
+ if (Data* e = lookup(Ops::getKey(element), h)) {
+ return &e->element;
+ }
+
+ if (dataLength == dataCapacity && !rehashOnFull()) {
+ return nullptr;
+ }
+
+ auto [entry, chain] = addEntry(h);
+ new (entry) Data(std::forward<ElementInput>(element), chain);
+ return &entry->element;
+ }
+
+ /*
+ * If the table contains an element matching l, remove it and set *foundp
+ * to true. Otherwise set *foundp to false.
+ *
+ * Return true on success, false if we tried to shrink the table and hit an
+ * allocation failure. Even if this returns false, *foundp is set correctly
+ * and the matching element was removed. Shrinking is an optimization and
+ * it's OK for it to fail.
+ */
+ bool remove(const Lookup& l, bool* foundp) {
+ // Note: This could be optimized so that removing the last entry,
+ // data[dataLength - 1], decrements dataLength. LIFO use cases would
+ // benefit.
+
+ // If a matching entry exists, empty it.
+ Data* e = lookup(l, prepareHash(l));
+ if (e == nullptr) {
+ *foundp = false;
+ return true;
+ }
+
+ *foundp = true;
+ liveCount--;
+ Ops::makeEmpty(&e->element);
+
+ // Update active Ranges.
+ uint32_t pos = e - data;
+ forEachRange<&Range::onRemove>(pos);
+
+ // If many entries have been removed, try to shrink the table.
+ if (hashBuckets() > initialBuckets() &&
+ liveCount < dataLength * minDataFill()) {
+ if (!rehash(hashShift + 1)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /*
+ * Remove all entries.
+ *
+ * Returns false on OOM, leaving the OrderedHashTable and any live Ranges
+ * in the old state.
+ *
+ * The effect on live Ranges is the same as removing all entries; in
+ * particular, those Ranges are still live and will see any entries added
+ * after a successful clear().
+ */
+ [[nodiscard]] bool clear() {
+ if (dataLength != 0) {
+ Data** oldHashTable = hashTable;
+ Data* oldData = data;
+ uint32_t oldHashBuckets = hashBuckets();
+ uint32_t oldDataLength = dataLength;
+ uint32_t oldDataCapacity = dataCapacity;
+
+ hashTable = nullptr;
+ if (!init()) {
+ // init() only mutates members on success; see comment above.
+ hashTable = oldHashTable;
+ return false;
+ }
+
+ alloc.free_(oldHashTable, oldHashBuckets);
+ freeData(oldData, oldDataLength, oldDataCapacity);
+ forEachRange<&Range::onClear>();
+ }
+
+ MOZ_ASSERT(hashTable);
+ MOZ_ASSERT(data);
+ MOZ_ASSERT(dataLength == 0);
+ MOZ_ASSERT(liveCount == 0);
+ return true;
+ }
+
+ /*
+ * Ranges are used to iterate over OrderedHashTables.
+ *
+ * Suppose 'Map' is some instance of OrderedHashMap, and 'map' is a Map.
+ * Then you can walk all the key-value pairs like this:
+ *
+ * for (Map::Range r = map.all(); !r.empty(); r.popFront()) {
+ * Map::Entry& pair = r.front();
+ * ... do something with pair ...
+ * }
+ *
+ * Ranges remain valid for the lifetime of the OrderedHashTable, even if
+ * entries are added or removed or the table is resized. Don't do anything
+ * to a Range, except destroy it, after the OrderedHashTable has been
+ * destroyed. (We support destroying the two objects in either order to
+ * humor the GC, bless its nondeterministic heart.)
+ *
+ * Warning: The behavior when the current front() entry is removed from the
+ * table is subtly different from js::HashTable<>::Enum::removeFront()!
+ * HashTable::Enum doesn't skip any entries when you removeFront() and then
+ * popFront(). OrderedHashTable::Range does! (This is useful for using a
+ * Range to implement JS Map.prototype.iterator.)
+ *
+ * The workaround is to call popFront() as soon as possible,
+ * before there's any possibility of modifying the table:
+ *
+ * for (Map::Range r = map.all(); !r.empty(); ) {
+ * Key key = r.front().key; // this won't modify map
+ * Value val = r.front().value; // this won't modify map
+ * r.popFront();
+ * // ...do things that might modify map...
+ * }
+ */
+ class Range {
+ friend class OrderedHashTable;
+
+ // Cannot be a reference since we need to be able to do
+ // |offsetof(Range, ht)|.
+ OrderedHashTable* ht;
+
+ /* The index of front() within ht->data. */
+ uint32_t i;
+
+ /*
+ * The number of nonempty entries in ht->data to the left of front().
+ * This is used when the table is resized or compacted.
+ */
+ uint32_t count;
+
+ /*
+ * Links in the doubly-linked list of active Ranges on ht.
+ *
+ * prevp points to the previous Range's .next field;
+ * or to ht->ranges if this is the first Range in the list.
+ * next points to the next Range;
+ * or nullptr if this is the last Range in the list.
+ *
+ * Invariant: *prevp == this.
+ */
+ Range** prevp;
+ Range* next;
+
+ /*
+ * Create a Range over all the entries in ht.
+ * (This is private on purpose. End users must use ht->all().)
+ */
+ Range(OrderedHashTable* ht, Range** listp)
+ : ht(ht), i(0), count(0), prevp(listp), next(*listp) {
+ *prevp = this;
+ if (next) {
+ next->prevp = &next;
+ }
+ seek();
+ }
+
+ public:
+ Range(const Range& other)
+ : ht(other.ht),
+ i(other.i),
+ count(other.count),
+ prevp(&ht->ranges),
+ next(ht->ranges) {
+ *prevp = this;
+ if (next) {
+ next->prevp = &next;
+ }
+ }
+
+ ~Range() {
+ *prevp = next;
+ if (next) {
+ next->prevp = prevp;
+ }
+ }
+
+ protected:
+ // Prohibit copy assignment.
+ Range& operator=(const Range& other) = delete;
+
+ void seek() {
+ while (i < ht->dataLength &&
+ Ops::isEmpty(Ops::getKey(ht->data[i].element))) {
+ i++;
+ }
+ }
+
+ /*
+ * The hash table calls this when an entry is removed.
+ * j is the index of the removed entry.
+ */
+ void onRemove(uint32_t j) {
+ MOZ_ASSERT(valid());
+ if (j < i) {
+ count--;
+ }
+ if (j == i) {
+ seek();
+ }
+ }
+
+ /*
+ * The hash table calls this when the table is resized or compacted.
+ * Since |count| is the number of nonempty entries to the left of
+ * front(), discarding the empty entries will not affect count, and it
+ * will make i and count equal.
+ */
+ void onCompact() {
+ MOZ_ASSERT(valid());
+ i = count;
+ }
+
+ /* The hash table calls this when cleared. */
+ void onClear() {
+ MOZ_ASSERT(valid());
+ i = count = 0;
+ }
+
+ bool valid() const { return next != this; }
+
+ void onTableDestroyed() {
+ MOZ_ASSERT(valid());
+ prevp = &next;
+ next = this;
+ }
+
+ public:
+ bool empty() const {
+ MOZ_ASSERT(valid());
+ return i >= ht->dataLength;
+ }
+
+ /*
+ * Return the first element in the range. This must not be called if
+ * this->empty().
+ *
+ * Warning: Removing an entry from the table also removes it from any
+ * live Ranges, and a Range can become empty that way, rendering
+ * front() invalid. If in doubt, check empty() before calling front().
+ */
+ const T& front() const {
+ MOZ_ASSERT(valid());
+ MOZ_ASSERT(!empty());
+ return ht->data[i].element;
+ }
+
+ /*
+ * Remove the first element from this range.
+ * This must not be called if this->empty().
+ *
+ * Warning: Removing an entry from the table also removes it from any
+ * live Ranges, and a Range can become empty that way, rendering
+ * popFront() invalid. If in doubt, check empty() before calling
+ * popFront().
+ */
+ void popFront() {
+ MOZ_ASSERT(valid());
+ MOZ_ASSERT(!empty());
+ MOZ_ASSERT(!Ops::isEmpty(Ops::getKey(ht->data[i].element)));
+ count++;
+ i++;
+ seek();
+ }
+
+ static size_t offsetOfHashTable() { return offsetof(Range, ht); }
+ static size_t offsetOfI() { return offsetof(Range, i); }
+ static size_t offsetOfCount() { return offsetof(Range, count); }
+ static size_t offsetOfPrevP() { return offsetof(Range, prevp); }
+ static size_t offsetOfNext() { return offsetof(Range, next); }
+
+ static void onTableDestroyed(Range* range, uint32_t arg) {
+ range->onTableDestroyed();
+ }
+ static void onRemove(Range* range, uint32_t arg) { range->onRemove(arg); }
+ static void onClear(Range* range, uint32_t arg) { range->onClear(); }
+ static void onCompact(Range* range, uint32_t arg) { range->onCompact(); }
+ };
+
+ class MutableRange : public Range {
+ MutableRange(OrderedHashTable* ht, Range** listp) : Range(ht, listp) {}
+ friend class OrderedHashTable;
+
+ public:
+ T& front() {
+ MOZ_ASSERT(this->valid());
+ MOZ_ASSERT(!this->empty());
+ return this->ht->data[this->i].element;
+ }
+
+ void rekeyFront(const Key& k) {
+ MOZ_ASSERT(this->valid());
+ this->ht->rekey(&this->ht->data[this->i], k);
+ }
+ };
+
+ Range all() const {
+ // Range operates on a mutable table but its interface does not permit
+ // modification of the contents of the table.
+ auto* self = const_cast<OrderedHashTable*>(this);
+ return Range(self, &self->ranges);
+ }
+ MutableRange mutableAll() { return MutableRange(this, &ranges); }
+
+ void trace(JSTracer* trc) {
+ for (uint32_t i = 0; i < dataLength; i++) {
+ if (!Ops::isEmpty(Ops::getKey(data[i].element))) {
+ Ops::trace(trc, this, i, data[i].element);
+ }
+ }
+ }
+
+ // For use by the implementation of Ops::trace.
+ template <typename Key>
+ void traceKey(JSTracer* trc, uint32_t index, Key& key) {
+ MOZ_ASSERT(index < dataLength);
+ using MutableKey = std::remove_const_t<Key>;
+ using UnbarrieredKey = typename RemoveBarrier<MutableKey>::Type;
+ UnbarrieredKey newKey = key;
+ JS::GCPolicy<UnbarrieredKey>::trace(trc, &newKey, "OrderedHashMap key");
+ if (newKey != key) {
+ rekey(&data[index], newKey);
+ }
+ }
+ template <typename Value>
+ void traceValue(JSTracer* trc, Value& value) {
+ JS::GCPolicy<Value>::trace(trc, &value, "OrderedHashMap value");
+ }
+
+ /*
+ * Allocate a new Range, possibly in nursery memory. The buffer must be
+ * large enough to hold a Range object.
+ *
+ * All nursery-allocated ranges can be freed in one go by calling
+ * destroyNurseryRanges().
+ */
+ Range* createRange(void* buffer, bool inNursery) const {
+ auto* self = const_cast<OrderedHashTable*>(this);
+ Range** listp = inNursery ? &self->nurseryRanges : &self->ranges;
+ new (buffer) Range(self, listp);
+ return static_cast<Range*>(buffer);
+ }
+
+ void destroyNurseryRanges() { nurseryRanges = nullptr; }
+
+ /*
+ * Change the value of the given key.
+ *
+ * This calls Ops::hash on both the current key and the new key.
+ * Ops::hash on the current key must return the same hash code as
+ * when the entry was added to the table.
+ */
+ void rekeyOneEntry(const Key& current, const Key& newKey, const T& element) {
+ if (current == newKey) {
+ return;
+ }
+
+ HashNumber currentHash = prepareHash(current);
+ Data* entry = lookup(current, currentHash);
+ MOZ_ASSERT(entry);
+
+ HashNumber oldHash = currentHash >> hashShift;
+ HashNumber newHash = prepareHash(newKey) >> hashShift;
+
+ entry->element = element;
+
+ // Remove this entry from its old hash chain. (If this crashes
+ // reading nullptr, it would mean we did not find this entry on
+ // the hash chain where we expected it. That probably means the
+ // key's hash code changed since it was inserted, breaking the
+ // hash code invariant.)
+ Data** ep = &hashTable[oldHash];
+ while (*ep != entry) {
+ ep = &(*ep)->chain;
+ }
+ *ep = entry->chain;
+
+ // Add it to the new hash chain. We could just insert it at the
+ // beginning of the chain. Instead, we do a bit of work to
+ // preserve the invariant that hash chains always go in reverse
+ // insertion order (descending memory order). No code currently
+ // depends on this invariant, so it's fine to kill it if
+ // needed.
+ ep = &hashTable[newHash];
+ while (*ep && *ep > entry) {
+ ep = &(*ep)->chain;
+ }
+ entry->chain = *ep;
+ *ep = entry;
+ }
+
+ static size_t offsetOfDataLength() {
+ return offsetof(OrderedHashTable, dataLength);
+ }
+ static size_t offsetOfData() { return offsetof(OrderedHashTable, data); }
+ static constexpr size_t offsetOfHashTable() {
+ return offsetof(OrderedHashTable, hashTable);
+ }
+ static constexpr size_t offsetOfHashShift() {
+ return offsetof(OrderedHashTable, hashShift);
+ }
+ static constexpr size_t offsetOfLiveCount() {
+ return offsetof(OrderedHashTable, liveCount);
+ }
+ static constexpr size_t offsetOfDataElement() {
+ static_assert(offsetof(Data, element) == 0,
+ "RangeFront and RangePopFront depend on offsetof(Data, "
+ "element) being 0");
+ return offsetof(Data, element);
+ }
+ static constexpr size_t offsetOfDataChain() { return offsetof(Data, chain); }
+ static constexpr size_t sizeofData() { return sizeof(Data); }
+
+ static constexpr size_t offsetOfHcsK0() {
+ return offsetof(OrderedHashTable, hcs) +
+ mozilla::HashCodeScrambler::offsetOfMK0();
+ }
+ static constexpr size_t offsetOfHcsK1() {
+ return offsetof(OrderedHashTable, hcs) +
+ mozilla::HashCodeScrambler::offsetOfMK1();
+ }
+
+ private:
+ /* Logarithm base 2 of the number of buckets in the hash table initially. */
+ static uint32_t initialBucketsLog2() { return 1; }
+ static uint32_t initialBuckets() { return 1 << initialBucketsLog2(); }
+
+ /*
+ * The maximum load factor (mean number of entries per bucket).
+ * It is an invariant that
+ * dataCapacity == floor(hashBuckets() * fillFactor()).
+ *
+ * The fill factor should be between 2 and 4, and it should be chosen so that
+ * the fill factor times sizeof(Data) is close to but <= a power of 2.
+ * This fixed fill factor was chosen to make the size of the data
+ * array, in bytes, close to a power of two when sizeof(T) is 16.
+ */
+ static constexpr double fillFactor() { return 8.0 / 3.0; }
+
+ /*
+ * The minimum permitted value of (liveCount / dataLength).
+ * If that ratio drops below this value, we shrink the table.
+ */
+ static double minDataFill() { return 0.25; }
+
+ public:
+ HashNumber prepareHash(const Lookup& l) const {
+ return mozilla::ScrambleHashCode(Ops::hash(l, hcs));
+ }
+
+ private:
+ /* The size of hashTable, in elements. Always a power of two. */
+ uint32_t hashBuckets() const {
+ return 1 << (js::kHashNumberBits - hashShift);
+ }
+
+ static void destroyData(Data* data, uint32_t length) {
+ for (Data* p = data + length; p != data;) {
+ (--p)->~Data();
+ }
+ }
+
+ void freeData(Data* data, uint32_t length, uint32_t capacity) {
+ destroyData(data, length);
+ alloc.free_(data, capacity);
+ }
+
+ Data* lookup(const Lookup& l, HashNumber h) {
+ for (Data* e = hashTable[h >> hashShift]; e; e = e->chain) {
+ if (Ops::match(Ops::getKey(e->element), l)) {
+ return e;
+ }
+ }
+ return nullptr;
+ }
+
+ const Data* lookup(const Lookup& l) const {
+ return const_cast<OrderedHashTable*>(this)->lookup(l, prepareHash(l));
+ }
+
+ std::tuple<Data*, Data*> addEntry(HashNumber hash) {
+ MOZ_ASSERT(dataLength < dataCapacity);
+ hash >>= hashShift;
+ liveCount++;
+ Data* entry = &data[dataLength++];
+ Data* chain = hashTable[hash];
+ hashTable[hash] = entry;
+ return std::make_tuple(entry, chain);
+ }
+
+ /* This is called after rehashing the table. */
+ void compacted() {
+ // If we had any empty entries, compacting may have moved live entries
+ // to the left within |data|. Notify all live Ranges of the change.
+ forEachRange<&Range::onCompact>();
+ }
+
+ /* Compact the entries in |data| and rehash them. */
+ void rehashInPlace() {
+ for (uint32_t i = 0, N = hashBuckets(); i < N; i++) {
+ hashTable[i] = nullptr;
+ }
+ Data* wp = data;
+ Data* end = data + dataLength;
+ for (Data* rp = data; rp != end; rp++) {
+ if (!Ops::isEmpty(Ops::getKey(rp->element))) {
+ HashNumber h = prepareHash(Ops::getKey(rp->element)) >> hashShift;
+ if (rp != wp) {
+ wp->element = std::move(rp->element);
+ }
+ wp->chain = hashTable[h];
+ hashTable[h] = wp;
+ wp++;
+ }
+ }
+ MOZ_ASSERT(wp == data + liveCount);
+
+ while (wp != end) {
+ (--end)->~Data();
+ }
+ dataLength = liveCount;
+ compacted();
+ }
+
+ [[nodiscard]] bool rehashOnFull() {
+ MOZ_ASSERT(dataLength == dataCapacity);
+
+ // If the hashTable is more than 1/4 deleted data, simply rehash in
+ // place to free up some space. Otherwise, grow the table.
+ uint32_t newHashShift =
+ liveCount >= dataCapacity * 0.75 ? hashShift - 1 : hashShift;
+ return rehash(newHashShift);
+ }
+
+ /*
+ * Grow, shrink, or compact both |hashTable| and |data|.
+ *
+ * On success, this returns true, dataLength == liveCount, and there are no
+ * empty elements in data[0:dataLength]. On allocation failure, this
+ * leaves everything as it was and returns false.
+ */
+ [[nodiscard]] bool rehash(uint32_t newHashShift) {
+ // If the size of the table is not changing, rehash in place to avoid
+ // allocating memory.
+ if (newHashShift == hashShift) {
+ rehashInPlace();
+ return true;
+ }
+
+ // Ensure the new capacity fits into INT32_MAX.
+ constexpr size_t maxCapacityLog2 =
+ mozilla::tl::FloorLog2<size_t(INT32_MAX / fillFactor())>::value;
+ static_assert(maxCapacityLog2 < kHashNumberBits);
+
+ // Fail if |(js::kHashNumberBits - newHashShift) > maxCapacityLog2|.
+ //
+ // Reorder |kHashNumberBits| so both constants are on the right-hand side.
+ if (MOZ_UNLIKELY(newHashShift < (js::kHashNumberBits - maxCapacityLog2))) {
+ alloc.reportAllocOverflow();
+ return false;
+ }
+
+ size_t newHashBuckets = size_t(1) << (js::kHashNumberBits - newHashShift);
+ Data** newHashTable = alloc.template pod_malloc<Data*>(newHashBuckets);
+ if (!newHashTable) {
+ return false;
+ }
+ for (uint32_t i = 0; i < newHashBuckets; i++) {
+ newHashTable[i] = nullptr;
+ }
+
+ uint32_t newCapacity = uint32_t(newHashBuckets * fillFactor());
+ Data* newData = alloc.template pod_malloc<Data>(newCapacity);
+ if (!newData) {
+ alloc.free_(newHashTable, newHashBuckets);
+ return false;
+ }
+
+ Data* wp = newData;
+ Data* end = data + dataLength;
+ for (Data* p = data; p != end; p++) {
+ if (!Ops::isEmpty(Ops::getKey(p->element))) {
+ HashNumber h = prepareHash(Ops::getKey(p->element)) >> newHashShift;
+ new (wp) Data(std::move(p->element), newHashTable[h]);
+ newHashTable[h] = wp;
+ wp++;
+ }
+ }
+ MOZ_ASSERT(wp == newData + liveCount);
+
+ alloc.free_(hashTable, hashBuckets());
+ freeData(data, dataLength, dataCapacity);
+
+ hashTable = newHashTable;
+ data = newData;
+ dataLength = liveCount;
+ dataCapacity = newCapacity;
+ hashShift = newHashShift;
+ MOZ_ASSERT(hashBuckets() == newHashBuckets);
+
+ compacted();
+ return true;
+ }
+
+ // Change the key of the front entry.
+ //
+ // This calls Ops::hash on both the current key and the new key. Ops::hash on
+ // the current key must return the same hash code as when the entry was added
+ // to the table.
+ void rekey(Data* entry, const Key& k) {
+ HashNumber oldHash = prepareHash(Ops::getKey(entry->element)) >> hashShift;
+ HashNumber newHash = prepareHash(k) >> hashShift;
+ Ops::setKey(entry->element, k);
+ if (newHash != oldHash) {
+ // Remove this entry from its old hash chain. (If this crashes reading
+ // nullptr, it would mean we did not find this entry on the hash chain
+ // where we expected it. That probably means the key's hash code changed
+ // since it was inserted, breaking the hash code invariant.)
+ Data** ep = &hashTable[oldHash];
+ while (*ep != entry) {
+ ep = &(*ep)->chain;
+ }
+ *ep = entry->chain;
+
+ // Add it to the new hash chain. We could just insert it at the beginning
+ // of the chain. Instead, we do a bit of work to preserve the invariant
+ // that hash chains always go in reverse insertion order (descending
+ // memory order). No code currently depends on this invariant, so it's
+ // fine to kill it if needed.
+ ep = &hashTable[newHash];
+ while (*ep && *ep > entry) {
+ ep = &(*ep)->chain;
+ }
+ entry->chain = *ep;
+ *ep = entry;
+ }
+ }
+
+ // Not copyable.
+ OrderedHashTable& operator=(const OrderedHashTable&) = delete;
+ OrderedHashTable(const OrderedHashTable&) = delete;
+};
+
+} // namespace detail
+
+template <class Key, class Value, class OrderedHashPolicy, class AllocPolicy>
+class OrderedHashMap {
+ public:
+ class Entry {
+ template <class, class, class>
+ friend class detail::OrderedHashTable;
+ void operator=(const Entry& rhs) {
+ const_cast<Key&>(key) = rhs.key;
+ value = rhs.value;
+ }
+
+ void operator=(Entry&& rhs) {
+ MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ const_cast<Key&>(key) = std::move(rhs.key);
+ value = std::move(rhs.value);
+ }
+
+ public:
+ Entry() : key(), value() {}
+ explicit Entry(const Key& k) : key(k), value() {}
+ template <typename V>
+ Entry(const Key& k, V&& v) : key(k), value(std::forward<V>(v)) {}
+ Entry(Entry&& rhs) : key(std::move(rhs.key)), value(std::move(rhs.value)) {}
+
+ const Key key;
+ Value value;
+
+ static size_t offsetOfKey() { return offsetof(Entry, key); }
+ static size_t offsetOfValue() { return offsetof(Entry, value); }
+ };
+
+ private:
+ struct MapOps;
+ using Impl = detail::OrderedHashTable<Entry, MapOps, AllocPolicy>;
+
+ struct MapOps : OrderedHashPolicy {
+ using KeyType = Key;
+ static void makeEmpty(Entry* e) {
+ OrderedHashPolicy::makeEmpty(const_cast<Key*>(&e->key));
+
+ // Clear the value. Destroying it is another possibility, but that
+ // would complicate class Entry considerably.
+ e->value = Value();
+ }
+ static const Key& getKey(const Entry& e) { return e.key; }
+ static void setKey(Entry& e, const Key& k) { const_cast<Key&>(e.key) = k; }
+ static void trace(JSTracer* trc, Impl* table, uint32_t index,
+ Entry& entry) {
+ table->traceKey(trc, index, entry.key);
+ table->traceValue(trc, entry.value);
+ }
+ };
+
+ Impl impl;
+
+ public:
+ using Lookup = typename Impl::Lookup;
+ using Range = typename Impl::Range;
+ using MutableRange = typename Impl::MutableRange;
+
+ OrderedHashMap(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
+ : impl(std::move(ap), hcs) {}
+ [[nodiscard]] bool init() { return impl.init(); }
+ uint32_t count() const { return impl.count(); }
+ bool has(const Lookup& key) const { return impl.has(key); }
+ Range all() const { return impl.all(); }
+ MutableRange mutableAll() { return impl.mutableAll(); }
+ const Entry* get(const Lookup& key) const { return impl.get(key); }
+ Entry* get(const Lookup& key) { return impl.get(key); }
+ bool remove(const Lookup& key, bool* foundp) {
+ return impl.remove(key, foundp);
+ }
+ [[nodiscard]] bool clear() { return impl.clear(); }
+
+ template <typename K, typename V>
+ [[nodiscard]] bool put(K&& key, V&& value) {
+ return impl.put(Entry(std::forward<K>(key), std::forward<V>(value)));
+ }
+
+ template <typename K>
+ [[nodiscard]] Entry* getOrAdd(K&& key) {
+ return impl.getOrAdd(Entry(std::forward<K>(key)));
+ }
+
+ HashNumber hash(const Lookup& key) const { return impl.prepareHash(key); }
+
+ template <typename GetNewKey>
+ void rekeyOneEntry(const Lookup& current, const GetNewKey& getNewKey) {
+ const Entry* e = get(current);
+ if (!e) {
+ return;
+ }
+ Key newKey = getNewKey(current);
+ return impl.rekeyOneEntry(current, newKey, Entry(newKey, e->value));
+ }
+
+ Range* createRange(void* buffer, bool inNursery) const {
+ return impl.createRange(buffer, inNursery);
+ }
+
+ void destroyNurseryRanges() { impl.destroyNurseryRanges(); }
+
+ void trace(JSTracer* trc) { impl.trace(trc); }
+
+ static size_t offsetOfEntryKey() { return Entry::offsetOfKey(); }
+ static size_t offsetOfImplDataLength() { return Impl::offsetOfDataLength(); }
+ static size_t offsetOfImplData() { return Impl::offsetOfData(); }
+ static constexpr size_t offsetOfImplHashTable() {
+ return Impl::offsetOfHashTable();
+ }
+ static constexpr size_t offsetOfImplHashShift() {
+ return Impl::offsetOfHashShift();
+ }
+ static constexpr size_t offsetOfImplLiveCount() {
+ return Impl::offsetOfLiveCount();
+ }
+ static constexpr size_t offsetOfImplDataElement() {
+ return Impl::offsetOfDataElement();
+ }
+ static constexpr size_t offsetOfImplDataChain() {
+ return Impl::offsetOfDataChain();
+ }
+ static constexpr size_t sizeofImplData() { return Impl::sizeofData(); }
+
+ static constexpr size_t offsetOfImplHcsK0() { return Impl::offsetOfHcsK0(); }
+ static constexpr size_t offsetOfImplHcsK1() { return Impl::offsetOfHcsK1(); }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+template <class T, class OrderedHashPolicy, class AllocPolicy>
+class OrderedHashSet {
+ private:
+ struct SetOps;
+ using Impl = detail::OrderedHashTable<T, SetOps, AllocPolicy>;
+
+ struct SetOps : OrderedHashPolicy {
+ using KeyType = const T;
+ static const T& getKey(const T& v) { return v; }
+ static void setKey(const T& e, const T& v) { const_cast<T&>(e) = v; }
+ static void trace(JSTracer* trc, Impl* table, uint32_t index, T& entry) {
+ table->traceKey(trc, index, entry);
+ }
+ };
+
+ Impl impl;
+
+ public:
+ using Lookup = typename Impl::Lookup;
+ using Range = typename Impl::Range;
+ using MutableRange = typename Impl::MutableRange;
+
+ explicit OrderedHashSet(AllocPolicy ap, mozilla::HashCodeScrambler hcs)
+ : impl(std::move(ap), hcs) {}
+ [[nodiscard]] bool init() { return impl.init(); }
+ uint32_t count() const { return impl.count(); }
+ bool has(const Lookup& value) const { return impl.has(value); }
+ Range all() const { return impl.all(); }
+ MutableRange mutableAll() { return impl.mutableAll(); }
+ template <typename Input>
+ [[nodiscard]] bool put(Input&& value) {
+ return impl.put(std::forward<Input>(value));
+ }
+ bool remove(const Lookup& value, bool* foundp) {
+ return impl.remove(value, foundp);
+ }
+ [[nodiscard]] bool clear() { return impl.clear(); }
+
+ HashNumber hash(const Lookup& value) const { return impl.prepareHash(value); }
+
+ template <typename GetNewKey>
+ void rekeyOneEntry(const Lookup& current, const GetNewKey& getNewKey) {
+ if (!has(current)) {
+ return;
+ }
+ T newKey = getNewKey(current);
+ return impl.rekeyOneEntry(current, newKey, newKey);
+ }
+
+ Range* createRange(void* buffer, bool inNursery) const {
+ return impl.createRange(buffer, inNursery);
+ }
+
+ void destroyNurseryRanges() { impl.destroyNurseryRanges(); }
+
+ void trace(JSTracer* trc) { impl.trace(trc); }
+
+ static size_t offsetOfEntryKey() { return 0; }
+ static size_t offsetOfImplDataLength() { return Impl::offsetOfDataLength(); }
+ static size_t offsetOfImplData() { return Impl::offsetOfData(); }
+ static constexpr size_t offsetOfImplHashTable() {
+ return Impl::offsetOfHashTable();
+ }
+ static constexpr size_t offsetOfImplHashShift() {
+ return Impl::offsetOfHashShift();
+ }
+ static constexpr size_t offsetOfImplLiveCount() {
+ return Impl::offsetOfLiveCount();
+ }
+ static constexpr size_t offsetOfImplDataElement() {
+ return Impl::offsetOfDataElement();
+ }
+ static constexpr size_t offsetOfImplDataChain() {
+ return Impl::offsetOfDataChain();
+ }
+ static constexpr size_t sizeofImplData() { return Impl::sizeofData(); }
+
+ static constexpr size_t offsetOfImplHcsK0() { return Impl::offsetOfHcsK0(); }
+ static constexpr size_t offsetOfImplHcsK1() { return Impl::offsetOfHcsK1(); }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+} // namespace js
+
+#endif /* ds_OrderedHashTable_h */
diff --git a/js/src/ds/PointerAndUint7.h b/js/src/ds/PointerAndUint7.h
new file mode 100644
index 0000000000..6313b0df48
--- /dev/null
+++ b/js/src/ds/PointerAndUint7.h
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sw=2 et tw=80:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_PointerAndUint7_h
+#define gc_PointerAndUint7_h
+
+#include "mozilla/Assertions.h"
+
+#include <stdint.h>
+
+namespace js {
+
+// A class that can store an address and a 7-bit unsigned integer in 64 bits,
+// even on a 64-bit target.
+//
+// On 64-bit targets, it assumes that all supported target architectures
+// contain at most 57 significant bits in their addresses, and that the valid
+// address space is split evenly between addresses increasing from 0--(64)--0
+// and addresses decreasing from 1--(64)--1.
+//
+// The 57-significant-bit constraint comes from Intel's 5-level paging scheme
+// as introduced in the Ice Lake processor line, circa late 2019; see
+// https://en.wikipedia.org/wiki/Intel_5-level_paging. Prior to that, Intel
+// required only 48 significant bits. AArch64 requires 52 significant bits,
+// as of the ARMv8.2 LVA (Large Virtual Addressing) extension, and so is less
+// constraining than Intel.
+//
+// In any case, NaN-boxing of pointers in JS::Value gives us a pretty hard
+// requirement that we can store pointers in 47 bits. So that constraint will
+// break before the 57-bit constraint here breaks. See SMDOC in
+// js/public/Value.h.
+//
+// On 32-bit targets, both components are stored unmodified in the upper and
+// lower 32-bit chunks of the value, and there are no constraints on the
+// component values.
+
+#ifdef JS_64BIT
+
+// The implementation for 64-bit targets.
+class PointerAndUint7 final {
+ // The representation is: the lowest 57 bits of the pointer are stored in
+ // the top 57 bits of val_, and the Uint7 is stored in the bottom 7 bits.
+ // Hence recovering the pointer is 7-bit signed shift right of val_, and
+ // recovering the UInt7 is an AND with 127. In both cases, that's a single
+ // machine instruction.
+ uint64_t val_;
+
+ static const uint8_t SHIFT_PTR = 7;
+ static const uint64_t MASK_UINT7 = (uint64_t(1) << SHIFT_PTR) - 1;
+
+ static inline bool isRepresentablePtr(void* ptr) {
+ // We require that the top 7 bits (bits 63:57) are the same as bit 56.
+ // That will be the case iff, when we signedly shift `ptr` right by 56
+ // bits, the value is all zeroes or all ones.
+ int64_t s = int64_t(ptr);
+ // s should be bbbb'bbbb'X--(56)--X, for b = 0 or 1, and X can be anything
+ s >>= (64 - SHIFT_PTR - 1); // 56
+ // s should be 0--(64)--0 or 1--(64)--1
+ uint64_t u = uint64_t(s);
+ // Note, this addition can overflow, intentionally.
+ u += 1;
+ // u should be 0--(64)--0 or 0--(63)--01
+ return u <= uint64_t(1);
+ }
+ static inline bool isRepresentableUint7(uint32_t uint7) {
+ return uint7 <= MASK_UINT7;
+ }
+
+ public:
+ inline PointerAndUint7() : val_(0) {}
+ inline PointerAndUint7(void* ptr, uint32_t uint7)
+ : val_((uint64_t(ptr) << SHIFT_PTR) | (uint64_t(uint7 & MASK_UINT7))) {
+ MOZ_ASSERT(isRepresentablePtr(ptr));
+ MOZ_ASSERT(isRepresentableUint7(uint7));
+ }
+ inline void* pointer() const { return (void*)(int64_t(val_) >> SHIFT_PTR); }
+ inline uint32_t uint7() const { return uint32_t(val_ & MASK_UINT7); }
+};
+
+static_assert(sizeof(void*) == 8);
+// "int64_t really is signed"
+static_assert(((int64_t(1) << 63) >> 63) == int64_t(0xFFFFFFFFFFFFFFFFULL));
+
+#else
+
+// The implementation for 32-bit targets.
+class PointerAndUint7 final {
+ // The representation places the pointer in the upper 32 bits of val_ and
+ // the Uint7 in the lower 32 bits. This is represented using a single
+ // 64-bit field in the hope of increasing the chance that the class will be
+ // passed around in a register-pair rather than through memory.
+ uint64_t val_;
+
+ static const uint8_t SHIFT_PTR = 32;
+ static const uint64_t MASK_UINT7 = (uint64_t(1) << 7) - 1;
+
+ static inline bool isRepresentableUint7(uint32_t uint7) {
+ return uint7 <= MASK_UINT7;
+ }
+
+ public:
+ inline PointerAndUint7() : val_(0) {}
+ inline PointerAndUint7(void* ptr, uint32_t uint7)
+ : val_((uint64_t(uint32_t(ptr)) << SHIFT_PTR) |
+ (uint64_t(uint7) & MASK_UINT7)) {
+ MOZ_ASSERT(isRepresentableUint7(uint7));
+ }
+ inline void* pointer() const { return (void*)(int32_t(val_ >> SHIFT_PTR)); }
+ inline uint32_t uint7() const { return uint32_t(val_ & MASK_UINT7); }
+};
+
+static_assert(sizeof(void*) == 4);
+
+#endif // JS_64BIT
+
+// We require this for both 32- and 64-bit targets.
+static_assert(sizeof(PointerAndUint7) == 8);
+
+} // namespace js
+
+#endif // gc_PointerAndUint7_h
diff --git a/js/src/ds/PriorityQueue.h b/js/src/ds/PriorityQueue.h
new file mode 100644
index 0000000000..9ed4788a5b
--- /dev/null
+++ b/js/src/ds/PriorityQueue.h
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_PriorityQueue_h
+#define ds_PriorityQueue_h
+
+#include "js/Vector.h"
+
+namespace js {
+
+/*
+ * Class which represents a heap based priority queue using a vector.
+ * Inserting elements and removing the highest priority one are both O(log n).
+ *
+ * Template parameters are the same as for Vector, with the addition that P
+ * must have a static priority(const T&) method which returns higher numbers
+ * for higher priority elements.
+ */
+template <class T, class P, size_t MinInlineCapacity = 0,
+ class AllocPolicy = TempAllocPolicy>
+class PriorityQueue {
+ Vector<T, MinInlineCapacity, AllocPolicy> heap;
+
+ PriorityQueue(const PriorityQueue&) = delete;
+ PriorityQueue& operator=(const PriorityQueue&) = delete;
+
+ public:
+ explicit PriorityQueue(AllocPolicy ap = AllocPolicy())
+ : heap(std::move(ap)) {}
+
+ [[nodiscard]] bool reserve(size_t capacity) { return heap.reserve(capacity); }
+
+ size_t length() const { return heap.length(); }
+
+ bool empty() const { return heap.empty(); }
+
+ T removeHighest() {
+ T highest = heap[0];
+ T last = heap.popCopy();
+ if (!heap.empty()) {
+ heap[0] = last;
+ siftDown(0);
+ }
+ return highest;
+ }
+
+ [[nodiscard]] bool insert(const T& v) {
+ if (!heap.append(v)) {
+ return false;
+ }
+ siftUp(heap.length() - 1);
+ return true;
+ }
+
+ void infallibleInsert(const T& v) {
+ heap.infallibleAppend(v);
+ siftUp(heap.length() - 1);
+ }
+
+ private:
+ /*
+ * Elements of the vector encode a binary tree:
+ *
+ * 0
+ * 1 2
+ * 3 4 5 6
+ *
+ * The children of element N are (2N + 1) and (2N + 2).
+ * The parent of element N is (N - 1) / 2.
+ *
+ * Each element has higher priority than its children.
+ */
+
+ void siftDown(size_t n) {
+ while (true) {
+ size_t left = n * 2 + 1;
+ size_t right = n * 2 + 2;
+
+ if (left < heap.length()) {
+ if (right < heap.length()) {
+ if (P::priority(heap[n]) < P::priority(heap[right]) &&
+ P::priority(heap[left]) < P::priority(heap[right])) {
+ swap(n, right);
+ n = right;
+ continue;
+ }
+ }
+
+ if (P::priority(heap[n]) < P::priority(heap[left])) {
+ swap(n, left);
+ n = left;
+ continue;
+ }
+ }
+
+ break;
+ }
+ }
+
+ void siftUp(size_t n) {
+ while (n > 0) {
+ size_t parent = (n - 1) / 2;
+
+ if (P::priority(heap[parent]) > P::priority(heap[n])) {
+ break;
+ }
+
+ swap(n, parent);
+ n = parent;
+ }
+ }
+
+ void swap(size_t a, size_t b) {
+ T tmp = heap[a];
+ heap[a] = heap[b];
+ heap[b] = tmp;
+ }
+};
+
+} /* namespace js */
+
+#endif /* ds_PriorityQueue_h */
diff --git a/js/src/ds/SinglyLinkedList.h b/js/src/ds/SinglyLinkedList.h
new file mode 100644
index 0000000000..dab9261d1c
--- /dev/null
+++ b/js/src/ds/SinglyLinkedList.h
@@ -0,0 +1,160 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_SinglyLinkedList_h
+#define ds_SinglyLinkedList_h
+
+#include "mozilla/Assertions.h"
+
+#include <utility>
+
+namespace js {
+
+/*
+ * Circular singly linked list that requires only one word per element and for
+ * the list itself.
+ *
+ * Requires T has field |T::next| for the link pointer.
+ */
+template <typename T>
+class SinglyLinkedList {
+ T* last_ = nullptr;
+
+ public:
+ SinglyLinkedList() {
+ static_assert(std::is_same_v<decltype(T::next), T*>,
+ "SinglyLinkedList requires T has a next field of type T*");
+ MOZ_ASSERT(isEmpty());
+ }
+
+ SinglyLinkedList(T* first, T* last) : last_(last) {
+ MOZ_ASSERT(!last_->next);
+ last_->next = first;
+ }
+
+ // It's not possible for elements to be present in more than one list, so copy
+ // operations are not provided.
+ SinglyLinkedList(const SinglyLinkedList& other) = delete;
+ SinglyLinkedList& operator=(const SinglyLinkedList& other) = delete;
+
+ SinglyLinkedList(SinglyLinkedList&& other) {
+ std::swap(last_, other.last_);
+ MOZ_ASSERT(other.isEmpty());
+ }
+ SinglyLinkedList& operator=(SinglyLinkedList&& other) {
+ MOZ_ASSERT(isEmpty());
+ return *new (this) SinglyLinkedList(std::move(other));
+ }
+
+ ~SinglyLinkedList() { MOZ_ASSERT(isEmpty()); }
+
+ bool isEmpty() const { return !last_; }
+
+ // These return nullptr if the list is empty.
+ T* first() const {
+ if (isEmpty()) {
+ return nullptr;
+ }
+ T* element = last_->next;
+ MOZ_ASSERT(element);
+ return element;
+ }
+ T* last() const { return last_; }
+
+ T* popFront() {
+ MOZ_ASSERT(!isEmpty());
+
+ T* element = last_->next;
+
+ if (element == last_) {
+ last_ = nullptr;
+ } else {
+ last_->next = element->next;
+ }
+
+ element->next = nullptr;
+ return element;
+ }
+ // popBack cannot be implemented in constant time for a singly linked list.
+
+ void pushFront(T* element) {
+ MOZ_ASSERT(!element->next);
+ if (isEmpty()) {
+ element->next = element;
+ last_ = element;
+ return;
+ }
+
+ element->next = last_->next;
+ last_->next = element;
+ }
+ void pushBack(T* element) {
+ pushFront(element);
+ moveFrontToBack();
+ }
+ void moveFrontToBack() {
+ MOZ_ASSERT(!isEmpty());
+ last_ = last_->next;
+ }
+
+ void append(SinglyLinkedList&& other) {
+ if (other.isEmpty()) {
+ return;
+ }
+
+ if (isEmpty()) {
+ new (this) SinglyLinkedList(std::move(other));
+ return;
+ }
+
+ T* firstElement = first();
+ last()->next = other.first();
+ other.last()->next = firstElement;
+ last_ = other.last();
+ other.last_ = nullptr;
+ }
+
+ // template <typename T>
+ class Iterator {
+ T* i = nullptr;
+ T* last = nullptr;
+
+ public:
+ explicit Iterator(const SinglyLinkedList& list)
+ : i(list.first()), last(list.last()) {}
+ bool done() const { return !i; }
+ void next() {
+ MOZ_ASSERT(!done());
+ i = i == last ? nullptr : i->next;
+ }
+ T* get() const {
+ MOZ_ASSERT(!done());
+ return i;
+ }
+
+ T& operator*() const { return *get(); }
+ T* operator->() const { return get(); }
+ };
+
+ Iterator iter() const { return Iterator(*this); }
+
+ // Extracts a non-circular linked list and clears this object.
+ T* release() {
+ if (isEmpty()) {
+ return nullptr;
+ }
+
+ T* list = first();
+ MOZ_ASSERT(last_->next);
+ last_->next = nullptr;
+ last_ = nullptr;
+ return list;
+ }
+};
+
+} /* namespace js */
+
+#endif /* ds_SinglyLinkedList_h */
diff --git a/js/src/ds/Sort.h b/js/src/ds/Sort.h
new file mode 100644
index 0000000000..c4a92973d6
--- /dev/null
+++ b/js/src/ds/Sort.h
@@ -0,0 +1,147 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_Sort_h
+#define ds_Sort_h
+
+#include "jstypes.h"
+
+namespace js {
+
+namespace detail {
+
+template <typename T>
+MOZ_ALWAYS_INLINE void CopyNonEmptyArray(T* dst, const T* src, size_t nelems) {
+ MOZ_ASSERT(nelems != 0);
+ const T* end = src + nelems;
+ do {
+ *dst++ = *src++;
+ } while (src != end);
+}
+
+/* Helper function for MergeSort. */
+template <typename T, typename Comparator>
+MOZ_ALWAYS_INLINE bool MergeArrayRuns(T* dst, const T* src, size_t run1,
+ size_t run2, Comparator c) {
+ MOZ_ASSERT(run1 >= 1);
+ MOZ_ASSERT(run2 >= 1);
+
+ /* Copy runs already in sorted order. */
+ const T* b = src + run1;
+ bool lessOrEqual;
+ if (!c(b[-1], b[0], &lessOrEqual)) {
+ return false;
+ }
+
+ if (!lessOrEqual) {
+ /* Runs are not already sorted, merge them. */
+ for (const T* a = src;;) {
+ if (!c(*a, *b, &lessOrEqual)) {
+ return false;
+ }
+ if (lessOrEqual) {
+ *dst++ = *a++;
+ if (!--run1) {
+ src = b;
+ break;
+ }
+ } else {
+ *dst++ = *b++;
+ if (!--run2) {
+ src = a;
+ break;
+ }
+ }
+ }
+ }
+ CopyNonEmptyArray(dst, src, run1 + run2);
+ return true;
+}
+
+} /* namespace detail */
+
+/*
+ * Sort the array using the merge sort algorithm. The scratch should point to
+ * a temporary storage that can hold nelems elements.
+ *
+ * The comparator must provide the () operator with the following signature:
+ *
+ * bool operator()(const T& a, const T& a, bool* lessOrEqualp);
+ *
+ * It should return true on success and set *lessOrEqualp to the result of
+ * a <= b operation. If it returns false, the sort terminates immediately with
+ * the false result. In this case the content of the array and scratch is
+ * arbitrary.
+ *
+ * Note: The merge sort algorithm is a stable sort, preserving relative ordering
+ * of entries that compare equal. This makes it a useful substitute for
+ * |std::stable_sort|, which can't be used in SpiderMonkey because it internally
+ * allocates memory without using SpiderMonkey's allocator.
+ */
+template <typename T, typename Comparator>
+[[nodiscard]] bool MergeSort(T* array, size_t nelems, T* scratch,
+ Comparator c) {
+ const size_t INS_SORT_LIMIT = 3;
+
+ if (nelems <= 1) {
+ return true;
+ }
+
+ /*
+ * Apply insertion sort to small chunks to reduce the number of merge
+ * passes needed.
+ */
+ for (size_t lo = 0; lo < nelems; lo += INS_SORT_LIMIT) {
+ size_t hi = lo + INS_SORT_LIMIT;
+ if (hi >= nelems) {
+ hi = nelems;
+ }
+ for (size_t i = lo + 1; i != hi; i++) {
+ for (size_t j = i;;) {
+ bool lessOrEqual;
+ if (!c(array[j - 1], array[j], &lessOrEqual)) {
+ return false;
+ }
+ if (lessOrEqual) {
+ break;
+ }
+ T tmp = array[j - 1];
+ array[j - 1] = array[j];
+ array[j] = tmp;
+ if (--j == lo) {
+ break;
+ }
+ }
+ }
+ }
+
+ T* vec1 = array;
+ T* vec2 = scratch;
+ for (size_t run = INS_SORT_LIMIT; run < nelems; run *= 2) {
+ for (size_t lo = 0; lo < nelems; lo += 2 * run) {
+ size_t hi = lo + run;
+ if (hi >= nelems) {
+ detail::CopyNonEmptyArray(vec2 + lo, vec1 + lo, nelems - lo);
+ break;
+ }
+ size_t run2 = (run <= nelems - hi) ? run : nelems - hi;
+ if (!detail::MergeArrayRuns(vec2 + lo, vec1 + lo, run, run2, c)) {
+ return false;
+ }
+ }
+ T* swap = vec1;
+ vec1 = vec2;
+ vec2 = swap;
+ }
+ if (vec1 == scratch) {
+ detail::CopyNonEmptyArray(array, scratch, nelems);
+ }
+ return true;
+}
+
+} /* namespace js */
+
+#endif /* ds_Sort_h */
diff --git a/js/src/ds/TraceableFifo.h b/js/src/ds/TraceableFifo.h
new file mode 100644
index 0000000000..a904610276
--- /dev/null
+++ b/js/src/ds/TraceableFifo.h
@@ -0,0 +1,93 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_TraceableFifo_h
+#define js_TraceableFifo_h
+
+#include "ds/Fifo.h"
+#include "js/RootingAPI.h"
+#include "js/TracingAPI.h"
+
+namespace js {
+
+// A TraceableFifo is a Fifo with an additional trace method that knows how to
+// visit all of the items stored in the Fifo. For Fifos that contain GC things,
+// this is usually more convenient than manually iterating and marking the
+// contents.
+//
+// Most types of GC pointers as keys and values can be traced with no extra
+// infrastructure. For structs and non-gc-pointer members, ensure that there is
+// a specialization of GCPolicy<T> with an appropriate trace method available
+// to handle the custom type. Generic helpers can be found in
+// js/public/TracingAPI.h. Generic helpers can be found in
+// js/public/TracingAPI.h.
+//
+// Note that although this Fifo's trace will deal correctly with moved items, it
+// does not itself know when to barrier or trace items. To function properly it
+// must either be used with Rooted, or barriered and traced manually.
+template <typename T, size_t MinInlineCapacity = 0,
+ typename AllocPolicy = TempAllocPolicy>
+class TraceableFifo : public js::Fifo<T, MinInlineCapacity, AllocPolicy> {
+ using Base = js::Fifo<T, MinInlineCapacity, AllocPolicy>;
+
+ public:
+ explicit TraceableFifo(AllocPolicy alloc = AllocPolicy())
+ : Base(std::move(alloc)) {}
+
+ TraceableFifo(TraceableFifo&& rhs) : Base(std::move(rhs)) {}
+ TraceableFifo& operator=(TraceableFifo&& rhs) = default;
+
+ TraceableFifo(const TraceableFifo&) = delete;
+ TraceableFifo& operator=(const TraceableFifo&) = delete;
+
+ void trace(JSTracer* trc) {
+ for (size_t i = 0; i < this->front_.length(); ++i) {
+ JS::GCPolicy<T>::trace(trc, &this->front_[i], "fifo element");
+ }
+ for (size_t i = 0; i < this->rear_.length(); ++i) {
+ JS::GCPolicy<T>::trace(trc, &this->rear_[i], "fifo element");
+ }
+ }
+};
+
+template <typename Wrapper, typename T, size_t Capacity, typename AllocPolicy>
+class WrappedPtrOperations<TraceableFifo<T, Capacity, AllocPolicy>, Wrapper> {
+ using TF = TraceableFifo<T, Capacity, AllocPolicy>;
+ const TF& fifo() const { return static_cast<const Wrapper*>(this)->get(); }
+
+ public:
+ size_t length() const { return fifo().length(); }
+ bool empty() const { return fifo().empty(); }
+ const T& front() const { return fifo().front(); }
+};
+
+template <typename Wrapper, typename T, size_t Capacity, typename AllocPolicy>
+class MutableWrappedPtrOperations<TraceableFifo<T, Capacity, AllocPolicy>,
+ Wrapper>
+ : public WrappedPtrOperations<TraceableFifo<T, Capacity, AllocPolicy>,
+ Wrapper> {
+ using TF = TraceableFifo<T, Capacity, AllocPolicy>;
+ TF& fifo() { return static_cast<Wrapper*>(this)->get(); }
+
+ public:
+ T& front() { return fifo().front(); }
+
+ template <typename U>
+ bool pushBack(U&& u) {
+ return fifo().pushBack(std::forward<U>(u));
+ }
+ template <typename... Args>
+ bool emplaceBack(Args&&... args) {
+ return fifo().emplaceBack(std::forward<Args...>(args...));
+ }
+
+ void popFront() { fifo().popFront(); }
+ void clear() { fifo().clear(); }
+};
+
+} // namespace js
+
+#endif // js_TraceableFifo_h