summaryrefslogtreecommitdiffstats
path: root/js/src/jit/JitcodeMap.h
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /js/src/jit/JitcodeMap.h
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/jit/JitcodeMap.h')
-rw-r--r--js/src/jit/JitcodeMap.h1235
1 files changed, 1235 insertions, 0 deletions
diff --git a/js/src/jit/JitcodeMap.h b/js/src/jit/JitcodeMap.h
new file mode 100644
index 0000000000..f25bf22299
--- /dev/null
+++ b/js/src/jit/JitcodeMap.h
@@ -0,0 +1,1235 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_JitcodeMap_h
+#define jit_JitcodeMap_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_ASSERT_IF, MOZ_CRASH
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t, uint64_t
+
+#include "jit/CompactBuffer.h" // CompactBufferReader, CompactBufferWriter
+#include "jit/shared/Assembler-shared.h" // CodeOffset
+#include "js/AllocPolicy.h" // SystemAllocPolicy
+#include "js/TypeDecls.h" // jsbytecode
+#include "js/Vector.h" // Vector
+#include "vm/BytecodeLocation.h" // BytecodeLocation
+
+class JSScript;
+class JSTracer;
+struct JSRuntime;
+class JSScript;
+
+namespace JS {
+class Zone;
+} // namespace JS
+
+namespace js {
+
+class GCMarker;
+
+namespace jit {
+
+class InlineScriptTree;
+
+/*
+ * The Ion jitcode map implements tables to allow mapping from addresses in ion
+ * jitcode to the list of (JSScript*, jsbytecode*) pairs that are implicitly
+ * active in the frame at that point in the native code.
+ *
+ * To represent this information efficiently, a multi-level table is used.
+ *
+ * At the top level, a global splay-tree of JitcodeGlobalEntry describings the
+ * mapping for each individual IonCode script generated by compiles. The
+ * entries are ordered by their nativeStartAddr.
+ *
+ * Every entry in the table is of fixed size, but there are different entry
+ * types, distinguished by the kind field.
+ */
+
+class JitcodeGlobalTable;
+class JitcodeIonTable;
+class JitcodeRegionEntry;
+
+class JitcodeGlobalEntry;
+
+struct NativeToBytecode {
+ CodeOffset nativeOffset;
+ InlineScriptTree* tree;
+ jsbytecode* pc;
+};
+
+class JitcodeSkiplistTower {
+ public:
+ static const unsigned MAX_HEIGHT = 32;
+
+ private:
+ uint8_t height_;
+ bool isFree_;
+ JitcodeGlobalEntry* ptrs_[1];
+
+ public:
+ explicit JitcodeSkiplistTower(unsigned height)
+ : height_(height), isFree_(false) {
+ MOZ_ASSERT(height >= 1 && height <= MAX_HEIGHT);
+ clearPtrs();
+ }
+
+ unsigned height() const { return height_; }
+
+ JitcodeGlobalEntry** ptrs(unsigned level) { return ptrs_; }
+
+ JitcodeGlobalEntry* next(unsigned level) const {
+ MOZ_ASSERT(!isFree_);
+ MOZ_ASSERT(level < height());
+ return ptrs_[level];
+ }
+ void setNext(unsigned level, JitcodeGlobalEntry* entry) {
+ MOZ_ASSERT(!isFree_);
+ MOZ_ASSERT(level < height());
+ ptrs_[level] = entry;
+ }
+
+ //
+ // When stored in a free-list, towers use 'ptrs_[0]' to store a
+ // pointer to the next tower. In this context only, 'ptrs_[0]'
+ // may refer to a |JitcodeSkiplistTower*| instead of a
+ // |JitcodeGlobalEntry*|.
+ //
+
+ void addToFreeList(JitcodeSkiplistTower** freeList) {
+ JitcodeSkiplistTower* nextFreeTower = *freeList;
+ MOZ_ASSERT_IF(nextFreeTower,
+ nextFreeTower->isFree_ && nextFreeTower->height() == height_);
+ ptrs_[0] = (JitcodeGlobalEntry*)nextFreeTower;
+ isFree_ = true;
+ *freeList = this;
+ }
+
+ static JitcodeSkiplistTower* PopFromFreeList(
+ JitcodeSkiplistTower** freeList) {
+ if (!*freeList) {
+ return nullptr;
+ }
+
+ JitcodeSkiplistTower* tower = *freeList;
+ MOZ_ASSERT(tower->isFree_);
+ JitcodeSkiplistTower* nextFreeTower =
+ (JitcodeSkiplistTower*)tower->ptrs_[0];
+ tower->clearPtrs();
+ tower->isFree_ = false;
+ *freeList = nextFreeTower;
+ return tower;
+ }
+
+ static size_t CalculateSize(unsigned height) {
+ MOZ_ASSERT(height >= 1);
+ return sizeof(JitcodeSkiplistTower) +
+ (sizeof(JitcodeGlobalEntry*) * (height - 1));
+ }
+
+ private:
+ void clearPtrs() {
+ for (unsigned i = 0; i < height_; i++) {
+ ptrs_[0] = nullptr;
+ }
+ }
+};
+
+class JitcodeGlobalEntry {
+ friend class JitcodeGlobalTable;
+
+ public:
+ enum Kind {
+ INVALID = 0,
+ Ion,
+ Baseline,
+ BaselineInterpreter,
+ Dummy,
+ Query,
+ LIMIT
+ };
+ static_assert(LIMIT <= 8);
+
+ typedef Vector<BytecodeLocation, 0, SystemAllocPolicy> BytecodeLocationVector;
+
+ struct BaseEntry {
+ static const uint64_t kNoSampleInBuffer = UINT64_MAX;
+
+ JitCode* jitcode_;
+ void* nativeStartAddr_;
+ void* nativeEndAddr_;
+ // If this entry is referenced from the profiler buffer, this is the
+ // position where the most recent sample that references it starts.
+ // Otherwise set to kNoSampleInBuffer.
+ uint64_t samplePositionInBuffer_;
+ Kind kind_ : 7;
+
+ void init() {
+ jitcode_ = nullptr;
+ nativeStartAddr_ = nullptr;
+ nativeEndAddr_ = nullptr;
+ samplePositionInBuffer_ = kNoSampleInBuffer;
+ kind_ = INVALID;
+ }
+
+ void init(Kind kind, JitCode* code, void* nativeStartAddr,
+ void* nativeEndAddr) {
+ MOZ_ASSERT_IF(kind != Query, code);
+ MOZ_ASSERT(nativeStartAddr);
+ MOZ_ASSERT(nativeEndAddr);
+ MOZ_ASSERT(kind > INVALID && kind < LIMIT);
+ jitcode_ = code;
+ nativeStartAddr_ = nativeStartAddr;
+ nativeEndAddr_ = nativeEndAddr;
+ samplePositionInBuffer_ = kNoSampleInBuffer;
+ kind_ = kind;
+ }
+
+ void setSamplePositionInBuffer(uint64_t bufferWritePos) {
+ samplePositionInBuffer_ = bufferWritePos;
+ }
+ void setAsExpired() { samplePositionInBuffer_ = kNoSampleInBuffer; }
+ bool isSampled(uint64_t bufferRangeStart) {
+ if (samplePositionInBuffer_ == kNoSampleInBuffer) {
+ return false;
+ }
+ return bufferRangeStart <= samplePositionInBuffer_;
+ }
+
+ Kind kind() const { return kind_; }
+ JitCode* jitcode() const { return jitcode_; }
+ void* nativeStartAddr() const { return nativeStartAddr_; }
+ void* nativeEndAddr() const { return nativeEndAddr_; }
+
+ bool startsBelowPointer(void* ptr) const {
+ return ((uint8_t*)nativeStartAddr()) <= ((uint8_t*)ptr);
+ }
+ bool endsAbovePointer(void* ptr) const {
+ return ((uint8_t*)nativeEndAddr()) > ((uint8_t*)ptr);
+ }
+ bool containsPointer(void* ptr) const {
+ return startsBelowPointer(ptr) && endsAbovePointer(ptr);
+ }
+
+ bool traceJitcode(JSTracer* trc);
+ bool isJitcodeMarkedFromAnyThread(JSRuntime* rt);
+ };
+
+ struct IonEntry : public BaseEntry {
+ // regionTable_ points to the start of the region table within the
+ // packed map for compile represented by this entry. Since the
+ // region table occurs at the tail of the memory region, this pointer
+ // points somewhere inside the region memory space, and not to the start
+ // of the memory space.
+ JitcodeIonTable* regionTable_;
+
+ struct ScriptNamePair {
+ JSScript* script;
+ char* str;
+ };
+
+ struct SizedScriptList {
+ uint32_t size;
+ ScriptNamePair pairs[1];
+ SizedScriptList(uint32_t sz, JSScript** scrs, char** strs) : size(sz) {
+ for (uint32_t i = 0; i < size; i++) {
+ pairs[i].script = scrs[i];
+ pairs[i].str = strs[i];
+ }
+ }
+
+ static uint32_t AllocSizeFor(uint32_t nscripts) {
+ return sizeof(SizedScriptList) +
+ ((nscripts - 1) * sizeof(ScriptNamePair));
+ }
+ };
+
+ SizedScriptList* scriptList_;
+
+ void init(JitCode* code, void* nativeStartAddr, void* nativeEndAddr,
+ SizedScriptList* scriptList, JitcodeIonTable* regionTable) {
+ MOZ_ASSERT(scriptList);
+ MOZ_ASSERT(regionTable);
+ BaseEntry::init(Ion, code, nativeStartAddr, nativeEndAddr);
+ regionTable_ = regionTable;
+ scriptList_ = scriptList;
+ }
+
+ SizedScriptList* sizedScriptList() const { return scriptList_; }
+
+ unsigned numScripts() const { return scriptList_->size; }
+
+ JSScript* getScript(unsigned idx) const {
+ MOZ_ASSERT(idx < numScripts());
+ return sizedScriptList()->pairs[idx].script;
+ }
+
+ const char* getStr(unsigned idx) const {
+ MOZ_ASSERT(idx < numScripts());
+ return sizedScriptList()->pairs[idx].str;
+ }
+
+ void destroy();
+
+ JitcodeIonTable* regionTable() const { return regionTable_; }
+
+ int scriptIndex(JSScript* script) const {
+ unsigned count = numScripts();
+ for (unsigned i = 0; i < count; i++) {
+ if (getScript(i) == script) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ void* canonicalNativeAddrFor(void* ptr) const;
+
+ [[nodiscard]] bool callStackAtAddr(void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const;
+
+ uint32_t callStackAtAddr(void* ptr, const char** results,
+ uint32_t maxResults) const;
+
+ uint64_t lookupRealmID(void* ptr) const;
+
+ bool trace(JSTracer* trc);
+ void sweepChildren();
+ bool isMarkedFromAnyThread(JSRuntime* rt);
+ };
+
+ struct BaselineEntry : public BaseEntry {
+ JSScript* script_;
+ const char* str_;
+
+ // Last location that caused Ion to abort compilation and the reason
+ // therein, if any. Only actionable aborts are tracked. Internal
+ // errors like OOMs are not.
+ jsbytecode* ionAbortPc_;
+ const char* ionAbortMessage_;
+
+ void init(JitCode* code, void* nativeStartAddr, void* nativeEndAddr,
+ JSScript* script, const char* str) {
+ MOZ_ASSERT(script != nullptr);
+ BaseEntry::init(Baseline, code, nativeStartAddr, nativeEndAddr);
+ script_ = script;
+ str_ = str;
+ }
+
+ JSScript* script() const { return script_; }
+
+ const char* str() const { return str_; }
+
+ void trackIonAbort(jsbytecode* pc, const char* message);
+
+ bool hadIonAbort() const {
+ MOZ_ASSERT(!ionAbortPc_ || ionAbortMessage_);
+ return ionAbortPc_ != nullptr;
+ }
+
+ void destroy();
+
+ void* canonicalNativeAddrFor(void* ptr) const;
+
+ [[nodiscard]] bool callStackAtAddr(void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const;
+
+ uint32_t callStackAtAddr(void* ptr, const char** results,
+ uint32_t maxResults) const;
+
+ uint64_t lookupRealmID() const;
+
+ bool trace(JSTracer* trc);
+ void sweepChildren();
+ bool isMarkedFromAnyThread(JSRuntime* rt);
+ };
+
+ struct BaselineInterpreterEntry : public BaseEntry {
+ void init(JitCode* code, void* nativeStartAddr, void* nativeEndAddr) {
+ BaseEntry::init(BaselineInterpreter, code, nativeStartAddr,
+ nativeEndAddr);
+ }
+
+ void destroy() {}
+
+ void* canonicalNativeAddrFor(void* ptr) const;
+
+ [[nodiscard]] bool callStackAtAddr(void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const;
+
+ uint32_t callStackAtAddr(void* ptr, const char** results,
+ uint32_t maxResults) const;
+
+ uint64_t lookupRealmID() const;
+ };
+
+ // Dummy entries are created for jitcode generated when profiling is not
+ // turned on, so that they have representation in the global table if they are
+ // on the stack when profiling is enabled.
+ struct DummyEntry : public BaseEntry {
+ void init(JitCode* code, void* nativeStartAddr, void* nativeEndAddr) {
+ BaseEntry::init(Dummy, code, nativeStartAddr, nativeEndAddr);
+ }
+
+ void destroy() {}
+
+ void* canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const {
+ return nullptr;
+ }
+
+ [[nodiscard]] bool callStackAtAddr(JSRuntime* rt, void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const {
+ return true;
+ }
+
+ uint32_t callStackAtAddr(JSRuntime* rt, void* ptr, const char** results,
+ uint32_t maxResults) const {
+ return 0;
+ }
+
+ uint64_t lookupRealmID() const { return 0; }
+ };
+
+ // QueryEntry is never stored in the table, just used for queries
+ // where an instance of JitcodeGlobalEntry is required to do tree
+ // lookups.
+ struct QueryEntry : public BaseEntry {
+ void init(void* addr) { BaseEntry::init(Query, nullptr, addr, addr); }
+ uint8_t* addr() const {
+ return reinterpret_cast<uint8_t*>(nativeStartAddr());
+ }
+ void destroy() {}
+ };
+
+ private:
+ JitcodeSkiplistTower* tower_;
+
+ union {
+ // Shadowing BaseEntry instance to allow access to base fields
+ // and type extraction.
+ BaseEntry base_;
+
+ // The most common entry type: describing jitcode generated by
+ // Ion main-line code.
+ IonEntry ion_;
+
+ // Baseline jitcode.
+ BaselineEntry baseline_;
+
+ // BaselineInterpreter code.
+ BaselineInterpreterEntry baselineInterpreter_;
+
+ // Dummy entries.
+ DummyEntry dummy_;
+
+ // When doing queries on the SplayTree for particular addresses,
+ // the query addresses are representd using a QueryEntry.
+ QueryEntry query_;
+ };
+
+ public:
+ JitcodeGlobalEntry() : tower_(nullptr) { base_.init(); }
+
+ explicit JitcodeGlobalEntry(const IonEntry& ion) : JitcodeGlobalEntry() {
+ ion_ = ion;
+ }
+
+ explicit JitcodeGlobalEntry(const BaselineEntry& baseline)
+ : JitcodeGlobalEntry() {
+ baseline_ = baseline;
+ }
+
+ explicit JitcodeGlobalEntry(const BaselineInterpreterEntry& baselineInterp)
+ : JitcodeGlobalEntry() {
+ baselineInterpreter_ = baselineInterp;
+ }
+
+ explicit JitcodeGlobalEntry(const DummyEntry& dummy) : JitcodeGlobalEntry() {
+ dummy_ = dummy;
+ }
+
+ explicit JitcodeGlobalEntry(const QueryEntry& query) : JitcodeGlobalEntry() {
+ query_ = query;
+ }
+
+ static JitcodeGlobalEntry MakeQuery(void* ptr) {
+ QueryEntry query;
+ query.init(ptr);
+ return JitcodeGlobalEntry(query);
+ }
+
+ void destroy() {
+ switch (kind()) {
+ case Ion:
+ ionEntry().destroy();
+ break;
+ case Baseline:
+ baselineEntry().destroy();
+ break;
+ case BaselineInterpreter:
+ baselineInterpreterEntry().destroy();
+ break;
+ case Dummy:
+ dummyEntry().destroy();
+ break;
+ case Query:
+ queryEntry().destroy();
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ }
+
+ JitCode* jitcode() const { return baseEntry().jitcode(); }
+ void* nativeStartAddr() const { return base_.nativeStartAddr(); }
+ void* nativeEndAddr() const { return base_.nativeEndAddr(); }
+
+ void setSamplePositionInBuffer(uint64_t samplePositionInBuffer) {
+ baseEntry().setSamplePositionInBuffer(samplePositionInBuffer);
+ }
+ void setAsExpired() { baseEntry().setAsExpired(); }
+ bool isSampled(uint64_t bufferRangeStart) {
+ return baseEntry().isSampled(bufferRangeStart);
+ }
+
+ bool startsBelowPointer(void* ptr) const {
+ return base_.startsBelowPointer(ptr);
+ }
+ bool endsAbovePointer(void* ptr) const { return base_.endsAbovePointer(ptr); }
+ bool containsPointer(void* ptr) const { return base_.containsPointer(ptr); }
+
+ bool overlapsWith(const JitcodeGlobalEntry& entry) const {
+ // Catch full containment of |entry| within |this|, and partial overlaps.
+ if (containsPointer(entry.nativeStartAddr()) ||
+ containsPointer(entry.nativeEndAddr())) {
+ return true;
+ }
+
+ // Catch full containment of |this| within |entry|.
+ if (startsBelowPointer(entry.nativeEndAddr()) &&
+ endsAbovePointer(entry.nativeStartAddr())) {
+ return true;
+ }
+
+ return false;
+ }
+
+ Kind kind() const { return base_.kind(); }
+
+ bool isValid() const { return (kind() > INVALID) && (kind() < LIMIT); }
+ bool isIon() const { return kind() == Ion; }
+ bool isBaseline() const { return kind() == Baseline; }
+ bool isBaselineInterpreter() const { return kind() == BaselineInterpreter; }
+ bool isDummy() const { return kind() == Dummy; }
+ bool isQuery() const { return kind() == Query; }
+
+ BaseEntry& baseEntry() {
+ MOZ_ASSERT(isValid());
+ return base_;
+ }
+ IonEntry& ionEntry() {
+ MOZ_ASSERT(isIon());
+ return ion_;
+ }
+ BaselineEntry& baselineEntry() {
+ MOZ_ASSERT(isBaseline());
+ return baseline_;
+ }
+ BaselineInterpreterEntry& baselineInterpreterEntry() {
+ MOZ_ASSERT(isBaselineInterpreter());
+ return baselineInterpreter_;
+ }
+ DummyEntry& dummyEntry() {
+ MOZ_ASSERT(isDummy());
+ return dummy_;
+ }
+ QueryEntry& queryEntry() {
+ MOZ_ASSERT(isQuery());
+ return query_;
+ }
+
+ const BaseEntry& baseEntry() const {
+ MOZ_ASSERT(isValid());
+ return base_;
+ }
+ const IonEntry& ionEntry() const {
+ MOZ_ASSERT(isIon());
+ return ion_;
+ }
+ const BaselineEntry& baselineEntry() const {
+ MOZ_ASSERT(isBaseline());
+ return baseline_;
+ }
+ const BaselineInterpreterEntry& baselineInterpreterEntry() const {
+ MOZ_ASSERT(isBaselineInterpreter());
+ return baselineInterpreter_;
+ }
+ const DummyEntry& dummyEntry() const {
+ MOZ_ASSERT(isDummy());
+ return dummy_;
+ }
+ const QueryEntry& queryEntry() const {
+ MOZ_ASSERT(isQuery());
+ return query_;
+ }
+
+ void* canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const {
+ switch (kind()) {
+ case Ion:
+ return ionEntry().canonicalNativeAddrFor(ptr);
+ case Baseline:
+ return baselineEntry().canonicalNativeAddrFor(ptr);
+ case Dummy:
+ return dummyEntry().canonicalNativeAddrFor(rt, ptr);
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return nullptr;
+ }
+
+ // Read the inline call stack at a given point in the native code and append
+ // into the given vector. Innermost (script,pc) pair will be appended first,
+ // and outermost appended last.
+ //
+ // Returns false on memory failure.
+ [[nodiscard]] bool callStackAtAddr(JSRuntime* rt, void* ptr,
+ BytecodeLocationVector& results,
+ uint32_t* depth) const {
+ switch (kind()) {
+ case Ion:
+ return ionEntry().callStackAtAddr(ptr, results, depth);
+ case Baseline:
+ return baselineEntry().callStackAtAddr(ptr, results, depth);
+ case BaselineInterpreter:
+ return baselineInterpreterEntry().callStackAtAddr(ptr, results, depth);
+ case Dummy:
+ return dummyEntry().callStackAtAddr(rt, ptr, results, depth);
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return false;
+ }
+
+ uint32_t callStackAtAddr(JSRuntime* rt, void* ptr, const char** results,
+ uint32_t maxResults) const {
+ switch (kind()) {
+ case Ion:
+ return ionEntry().callStackAtAddr(ptr, results, maxResults);
+ case Baseline:
+ return baselineEntry().callStackAtAddr(ptr, results, maxResults);
+ case BaselineInterpreter:
+ return baselineInterpreterEntry().callStackAtAddr(ptr, results,
+ maxResults);
+ case Dummy:
+ return dummyEntry().callStackAtAddr(rt, ptr, results, maxResults);
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return false;
+ }
+
+ uint64_t lookupRealmID(JSRuntime* rt, void* ptr) const {
+ switch (kind()) {
+ case Ion:
+ return ionEntry().lookupRealmID(ptr);
+ case Baseline:
+ return baselineEntry().lookupRealmID();
+ case Dummy:
+ return dummyEntry().lookupRealmID();
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ }
+
+ // Figure out the number of the (JSScript*, jsbytecode*) pairs that are active
+ // at this location.
+ uint32_t lookupInlineCallDepth(void* ptr);
+
+ // Compare two global entries.
+ static int compare(const JitcodeGlobalEntry& ent1,
+ const JitcodeGlobalEntry& ent2);
+ int compareTo(const JitcodeGlobalEntry& other) {
+ return compare(*this, other);
+ }
+
+ Zone* zone() { return baseEntry().jitcode()->zone(); }
+
+ bool trace(JSTracer* trc) {
+ bool tracedAny = baseEntry().traceJitcode(trc);
+ switch (kind()) {
+ case Ion:
+ tracedAny |= ionEntry().trace(trc);
+ break;
+ case Baseline:
+ tracedAny |= baselineEntry().trace(trc);
+ break;
+ case BaselineInterpreter:
+ case Dummy:
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return tracedAny;
+ }
+
+ void sweepChildren(JSRuntime* rt) {
+ switch (kind()) {
+ case Ion:
+ ionEntry().sweepChildren();
+ break;
+ case Baseline:
+ baselineEntry().sweepChildren();
+ break;
+ case BaselineInterpreter:
+ case Dummy:
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ }
+
+ bool isMarkedFromAnyThread(JSRuntime* rt) {
+ if (!baseEntry().isJitcodeMarkedFromAnyThread(rt)) {
+ return false;
+ }
+ switch (kind()) {
+ case Ion:
+ return ionEntry().isMarkedFromAnyThread(rt);
+ case Baseline:
+ return baselineEntry().isMarkedFromAnyThread(rt);
+ case Dummy:
+ break;
+ default:
+ MOZ_CRASH("Invalid JitcodeGlobalEntry kind.");
+ }
+ return true;
+ }
+
+ //
+ // When stored in a free-list, entries use 'tower_' to store a
+ // pointer to the next entry. In this context only, 'tower_'
+ // may refer to a |JitcodeGlobalEntry*| instead of a
+ // |JitcodeSkiplistTower*|.
+ //
+
+ void addToFreeList(JitcodeGlobalEntry** freeList) {
+ MOZ_ASSERT(!isValid());
+
+ JitcodeGlobalEntry* nextFreeEntry = *freeList;
+ MOZ_ASSERT_IF(nextFreeEntry, !nextFreeEntry->isValid());
+
+ tower_ = (JitcodeSkiplistTower*)nextFreeEntry;
+ *freeList = this;
+ }
+
+ static JitcodeGlobalEntry* PopFromFreeList(JitcodeGlobalEntry** freeList) {
+ if (!*freeList) {
+ return nullptr;
+ }
+
+ JitcodeGlobalEntry* entry = *freeList;
+ MOZ_ASSERT(!entry->isValid());
+ JitcodeGlobalEntry* nextFreeEntry = (JitcodeGlobalEntry*)entry->tower_;
+ entry->tower_ = nullptr;
+ *freeList = nextFreeEntry;
+ return entry;
+ }
+};
+
+/*
+ * Global table of JitcodeGlobalEntry values sorted by native address range.
+ */
+class JitcodeGlobalTable {
+ private:
+ static const size_t LIFO_CHUNK_SIZE = 16 * 1024;
+
+ LifoAlloc alloc_;
+ JitcodeGlobalEntry* freeEntries_;
+ uint32_t rand_;
+ uint32_t skiplistSize_;
+
+ JitcodeGlobalEntry* startTower_[JitcodeSkiplistTower::MAX_HEIGHT];
+ JitcodeSkiplistTower* freeTowers_[JitcodeSkiplistTower::MAX_HEIGHT];
+
+ public:
+ JitcodeGlobalTable()
+ : alloc_(LIFO_CHUNK_SIZE),
+ freeEntries_(nullptr),
+ rand_(0),
+ skiplistSize_(0) {
+ for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++) {
+ startTower_[i] = nullptr;
+ }
+ for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++) {
+ freeTowers_[i] = nullptr;
+ }
+ }
+ ~JitcodeGlobalTable() = default;
+
+ bool empty() const { return skiplistSize_ == 0; }
+
+ JitcodeGlobalEntry* lookup(void* ptr) { return lookupInternal(ptr); }
+
+ const JitcodeGlobalEntry* lookupForSampler(void* ptr, JSRuntime* rt,
+ uint64_t samplePosInBuffer);
+
+ [[nodiscard]] bool addEntry(const JitcodeGlobalEntry::IonEntry& entry) {
+ return addEntry(JitcodeGlobalEntry(entry));
+ }
+ [[nodiscard]] bool addEntry(const JitcodeGlobalEntry::BaselineEntry& entry) {
+ return addEntry(JitcodeGlobalEntry(entry));
+ }
+ [[nodiscard]] bool addEntry(
+ const JitcodeGlobalEntry::BaselineInterpreterEntry& entry) {
+ return addEntry(JitcodeGlobalEntry(entry));
+ }
+ [[nodiscard]] bool addEntry(const JitcodeGlobalEntry::DummyEntry& entry) {
+ return addEntry(JitcodeGlobalEntry(entry));
+ }
+
+ void removeEntry(JitcodeGlobalEntry& entry, JitcodeGlobalEntry** prevTower);
+ void releaseEntry(JitcodeGlobalEntry& entry, JitcodeGlobalEntry** prevTower,
+ JSRuntime* rt);
+
+ void setAllEntriesAsExpired();
+ [[nodiscard]] bool markIteratively(GCMarker* marker);
+ void traceWeak(JSRuntime* rt, JSTracer* trc);
+
+ private:
+ [[nodiscard]] bool addEntry(const JitcodeGlobalEntry& entry);
+
+ JitcodeGlobalEntry* lookupInternal(void* ptr);
+
+ // Initialize towerOut such that towerOut[i] (for i in [0, MAX_HEIGHT-1])
+ // is a JitcodeGlobalEntry that is sorted to be <query, whose successor at
+ // level i is either null, or sorted to be >= query.
+ //
+ // If entry with the given properties does not exist for level i, then
+ // towerOut[i] is initialized to nullptr.
+ void searchInternal(const JitcodeGlobalEntry& query,
+ JitcodeGlobalEntry** towerOut);
+
+ JitcodeGlobalEntry* searchAtHeight(unsigned level, JitcodeGlobalEntry* start,
+ const JitcodeGlobalEntry& query);
+
+ // Calculate next random tower height.
+ unsigned generateTowerHeight();
+
+ JitcodeSkiplistTower* allocateTower(unsigned height);
+ JitcodeGlobalEntry* allocateEntry();
+
+#ifdef DEBUG
+ void verifySkiplist();
+#else
+ void verifySkiplist() {}
+#endif
+
+ public:
+ class Range {
+ protected:
+ JitcodeGlobalTable& table_;
+ JitcodeGlobalEntry* cur_;
+
+ public:
+ explicit Range(JitcodeGlobalTable& table)
+ : table_(table), cur_(table.startTower_[0]) {}
+
+ JitcodeGlobalEntry* front() const {
+ MOZ_ASSERT(!empty());
+ return cur_;
+ }
+
+ bool empty() const { return !cur_; }
+
+ void popFront() {
+ MOZ_ASSERT(!empty());
+ cur_ = cur_->tower_->next(0);
+ }
+ };
+
+ // An enumerator class that can remove entries as it enumerates. If this
+ // functionality is not needed, use Range instead.
+ class Enum : public Range {
+ JSRuntime* rt_;
+ JitcodeGlobalEntry* next_;
+ JitcodeGlobalEntry* prevTower_[JitcodeSkiplistTower::MAX_HEIGHT];
+
+ public:
+ Enum(JitcodeGlobalTable& table, JSRuntime* rt);
+
+ void popFront();
+ void removeFront();
+ };
+};
+
+// clang-format off
+/*
+ * Container class for main jitcode table.
+ * The Region table's memory is structured as follows:
+ *
+ * +------------------------------------------------+ |
+ * | Region 1 Run | |
+ * |------------------------------------------------| |
+ * | Region 2 Run | |
+ * | | |
+ * | | |
+ * |------------------------------------------------| |
+ * | Region 3 Run | |
+ * | | |
+ * |------------------------------------------------| |-- Payload
+ * | | |
+ * | ... | |
+ * | | |
+ * |------------------------------------------------| |
+ * | Region M Run | |
+ * | | |
+ * +================================================+ <- RegionTable pointer points here
+ * | uint23_t numRegions = M | |
+ * +------------------------------------------------+ |
+ * | Region 1 | |
+ * | uint32_t entryOffset = size(Payload) | |
+ * +------------------------------------------------+ |
+ * | | |-- Table
+ * | ... | |
+ * | | |
+ * +------------------------------------------------+ |
+ * | Region M | |
+ * | uint32_t entryOffset | |
+ * +------------------------------------------------+ |
+ *
+ * The region table is composed of two sections: a tail section that contains a table of
+ * fixed-size entries containing offsets into the the head section, and a head section that
+ * holds a sequence of variable-sized runs. The table in the tail section serves to
+ * locate the variable-length encoded structures in the head section.
+ *
+ * The entryOffsets in the table indicate the bytes offset to subtract from the regionTable
+ * pointer to arrive at the encoded region in the payload.
+ *
+ *
+ * Variable-length entries in payload
+ * ----------------------------------
+ * The entryOffsets in the region table's fixed-sized entries refer to a location within the
+ * variable-length payload section. This location contains a compactly encoded "run" of
+ * mappings.
+ *
+ * Each run starts by describing the offset within the native code it starts at, and the
+ * sequence of (JSScript*, jsbytecode*) pairs active at that site. Following that, there
+ * are a number of variable-length entries encoding (nativeOffsetDelta, bytecodeOffsetDelta)
+ * pairs for the run.
+ *
+ * VarUint32 nativeOffset;
+ * - The offset from nativeStartAddr in the global table entry at which
+ * the jitcode for this region starts.
+ *
+ * Uint8_t scriptDepth;
+ * - The depth of inlined scripts for this region.
+ *
+ * List<VarUint32> inlineScriptPcStack;
+ * - We encode (2 * scriptDepth) VarUint32s here. Each pair of uint32s are taken
+ * as an index into the scriptList in the global table entry, and a pcOffset
+ * respectively.
+ *
+ * List<NativeAndBytecodeDelta> deltaRun;
+ * - The rest of the entry is a deltaRun that stores a series of variable-length
+ * encoded NativeAndBytecodeDelta datums.
+ */
+// clang-format on
+class JitcodeRegionEntry {
+ private:
+ static const unsigned MAX_RUN_LENGTH = 100;
+
+ public:
+ static void WriteHead(CompactBufferWriter& writer, uint32_t nativeOffset,
+ uint8_t scriptDepth);
+ static void ReadHead(CompactBufferReader& reader, uint32_t* nativeOffset,
+ uint8_t* scriptDepth);
+
+ static void WriteScriptPc(CompactBufferWriter& writer, uint32_t scriptIdx,
+ uint32_t pcOffset);
+ static void ReadScriptPc(CompactBufferReader& reader, uint32_t* scriptIdx,
+ uint32_t* pcOffset);
+
+ static void WriteDelta(CompactBufferWriter& writer, uint32_t nativeDelta,
+ int32_t pcDelta);
+ static void ReadDelta(CompactBufferReader& reader, uint32_t* nativeDelta,
+ int32_t* pcDelta);
+
+ // Given a pointer into an array of NativeToBytecode (and a pointer to the end
+ // of the array), compute the number of entries that would be consume by
+ // outputting a run starting at this one.
+ static uint32_t ExpectedRunLength(const NativeToBytecode* entry,
+ const NativeToBytecode* end);
+
+ // Write a run, starting at the given NativeToBytecode entry, into the given
+ // buffer writer.
+ [[nodiscard]] static bool WriteRun(CompactBufferWriter& writer,
+ JSScript** scriptList,
+ uint32_t scriptListSize,
+ uint32_t runLength,
+ const NativeToBytecode* entry);
+
+ // Delta Run entry formats are encoded little-endian:
+ //
+ // byte 0
+ // NNNN-BBB0
+ // Single byte format. nativeDelta in [0, 15], pcDelta in [0, 7]
+ //
+ static const uint32_t ENC1_MASK = 0x1;
+ static const uint32_t ENC1_MASK_VAL = 0x0;
+
+ static const uint32_t ENC1_NATIVE_DELTA_MAX = 0xf;
+ static const unsigned ENC1_NATIVE_DELTA_SHIFT = 4;
+
+ static const uint32_t ENC1_PC_DELTA_MASK = 0x0e;
+ static const int32_t ENC1_PC_DELTA_MAX = 0x7;
+ static const unsigned ENC1_PC_DELTA_SHIFT = 1;
+
+ // byte 1 byte 0
+ // NNNN-NNNN BBBB-BB01
+ // Two-byte format. nativeDelta in [0, 255], pcDelta in [0, 63]
+ //
+ static const uint32_t ENC2_MASK = 0x3;
+ static const uint32_t ENC2_MASK_VAL = 0x1;
+
+ static const uint32_t ENC2_NATIVE_DELTA_MAX = 0xff;
+ static const unsigned ENC2_NATIVE_DELTA_SHIFT = 8;
+
+ static const uint32_t ENC2_PC_DELTA_MASK = 0x00fc;
+ static const int32_t ENC2_PC_DELTA_MAX = 0x3f;
+ static const unsigned ENC2_PC_DELTA_SHIFT = 2;
+
+ // byte 2 byte 1 byte 0
+ // NNNN-NNNN NNNB-BBBB BBBB-B011
+ // Three-byte format. nativeDelta in [0, 2047], pcDelta in [-512, 511]
+ //
+ static const uint32_t ENC3_MASK = 0x7;
+ static const uint32_t ENC3_MASK_VAL = 0x3;
+
+ static const uint32_t ENC3_NATIVE_DELTA_MAX = 0x7ff;
+ static const unsigned ENC3_NATIVE_DELTA_SHIFT = 13;
+
+ static const uint32_t ENC3_PC_DELTA_MASK = 0x001ff8;
+ static const int32_t ENC3_PC_DELTA_MAX = 0x1ff;
+ static const int32_t ENC3_PC_DELTA_MIN = -ENC3_PC_DELTA_MAX - 1;
+ static const unsigned ENC3_PC_DELTA_SHIFT = 3;
+
+ // byte 3 byte 2 byte 1 byte 0
+ // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
+ // Three-byte format. nativeDelta in [0, 65535],
+ // pcDelta in [-4096, 4095]
+ static const uint32_t ENC4_MASK = 0x7;
+ static const uint32_t ENC4_MASK_VAL = 0x7;
+
+ static const uint32_t ENC4_NATIVE_DELTA_MAX = 0xffff;
+ static const unsigned ENC4_NATIVE_DELTA_SHIFT = 16;
+
+ static const uint32_t ENC4_PC_DELTA_MASK = 0x0000fff8;
+ static const int32_t ENC4_PC_DELTA_MAX = 0xfff;
+ static const int32_t ENC4_PC_DELTA_MIN = -ENC4_PC_DELTA_MAX - 1;
+ static const unsigned ENC4_PC_DELTA_SHIFT = 3;
+
+ static bool IsDeltaEncodeable(uint32_t nativeDelta, int32_t pcDelta) {
+ return (nativeDelta <= ENC4_NATIVE_DELTA_MAX) &&
+ (pcDelta >= ENC4_PC_DELTA_MIN) && (pcDelta <= ENC4_PC_DELTA_MAX);
+ }
+
+ private:
+ const uint8_t* data_;
+ const uint8_t* end_;
+
+ // Unpacked state from jitcode entry.
+ uint32_t nativeOffset_;
+ uint8_t scriptDepth_;
+ const uint8_t* scriptPcStack_;
+ const uint8_t* deltaRun_;
+
+ void unpack();
+
+ public:
+ JitcodeRegionEntry(const uint8_t* data, const uint8_t* end)
+ : data_(data),
+ end_(end),
+ nativeOffset_(0),
+ scriptDepth_(0),
+ scriptPcStack_(nullptr),
+ deltaRun_(nullptr) {
+ MOZ_ASSERT(data_ < end_);
+ unpack();
+ MOZ_ASSERT(scriptPcStack_ < end_);
+ MOZ_ASSERT(deltaRun_ <= end_);
+ }
+
+ uint32_t nativeOffset() const { return nativeOffset_; }
+ uint32_t scriptDepth() const { return scriptDepth_; }
+
+ class ScriptPcIterator {
+ private:
+ const uint8_t* start_;
+ const uint8_t* end_;
+#ifdef DEBUG
+ uint32_t count_;
+#endif
+ uint32_t idx_;
+ const uint8_t* cur_;
+
+ public:
+ ScriptPcIterator(const uint8_t* start, const uint8_t* end, uint32_t count)
+ : start_(start),
+ end_(end),
+#ifdef DEBUG
+ count_(count),
+#endif
+ idx_(0),
+ cur_(start_) {
+ }
+
+ bool hasMore() const {
+ MOZ_ASSERT((idx_ == count_) == (cur_ == end_));
+ MOZ_ASSERT((idx_ < count_) == (cur_ < end_));
+ return cur_ < end_;
+ }
+
+ void readNext(uint32_t* scriptIdxOut, uint32_t* pcOffsetOut) {
+ MOZ_ASSERT(scriptIdxOut);
+ MOZ_ASSERT(pcOffsetOut);
+ MOZ_ASSERT(hasMore());
+
+ CompactBufferReader reader(cur_, end_);
+ ReadScriptPc(reader, scriptIdxOut, pcOffsetOut);
+
+ cur_ = reader.currentPosition();
+ MOZ_ASSERT(cur_ <= end_);
+
+ idx_++;
+ MOZ_ASSERT_IF(idx_ == count_, cur_ == end_);
+ }
+
+ void reset() {
+ idx_ = 0;
+ cur_ = start_;
+ }
+ };
+
+ ScriptPcIterator scriptPcIterator() const {
+ // End of script+pc sequence is the start of the delta run.
+ return ScriptPcIterator(scriptPcStack_, deltaRun_, scriptDepth_);
+ }
+
+ class DeltaIterator {
+ private:
+ const uint8_t* start_;
+ const uint8_t* end_;
+ const uint8_t* cur_;
+
+ public:
+ DeltaIterator(const uint8_t* start, const uint8_t* end)
+ : start_(start), end_(end), cur_(start) {}
+
+ bool hasMore() const {
+ MOZ_ASSERT(cur_ <= end_);
+ return cur_ < end_;
+ }
+
+ void readNext(uint32_t* nativeDeltaOut, int32_t* pcDeltaOut) {
+ MOZ_ASSERT(nativeDeltaOut != nullptr);
+ MOZ_ASSERT(pcDeltaOut != nullptr);
+
+ MOZ_ASSERT(hasMore());
+
+ CompactBufferReader reader(cur_, end_);
+ ReadDelta(reader, nativeDeltaOut, pcDeltaOut);
+
+ cur_ = reader.currentPosition();
+ MOZ_ASSERT(cur_ <= end_);
+ }
+
+ void reset() { cur_ = start_; }
+ };
+ DeltaIterator deltaIterator() const { return DeltaIterator(deltaRun_, end_); }
+
+ uint32_t findPcOffset(uint32_t queryNativeOffset,
+ uint32_t startPcOffset) const;
+};
+
+class JitcodeIonTable {
+ private:
+ /* Variable length payload section "below" here. */
+ uint32_t numRegions_;
+ uint32_t regionOffsets_[1];
+
+ const uint8_t* payloadEnd() const {
+ return reinterpret_cast<const uint8_t*>(this);
+ }
+
+ public:
+ explicit JitcodeIonTable(uint32_t numRegions) : numRegions_(numRegions) {
+ for (uint32_t i = 0; i < numRegions; i++) {
+ regionOffsets_[i] = 0;
+ }
+ }
+
+ [[nodiscard]] bool makeIonEntry(JSContext* cx, JitCode* code,
+ uint32_t numScripts, JSScript** scripts,
+ JitcodeGlobalEntry::IonEntry& out);
+
+ uint32_t numRegions() const { return numRegions_; }
+
+ uint32_t regionOffset(uint32_t regionIndex) const {
+ MOZ_ASSERT(regionIndex < numRegions());
+ return regionOffsets_[regionIndex];
+ }
+
+ JitcodeRegionEntry regionEntry(uint32_t regionIndex) const {
+ const uint8_t* regionStart = payloadEnd() - regionOffset(regionIndex);
+ const uint8_t* regionEnd = payloadEnd();
+ if (regionIndex < numRegions_ - 1) {
+ regionEnd -= regionOffset(regionIndex + 1);
+ }
+ return JitcodeRegionEntry(regionStart, regionEnd);
+ }
+
+ bool regionContainsOffset(uint32_t regionIndex, uint32_t nativeOffset) {
+ MOZ_ASSERT(regionIndex < numRegions());
+
+ JitcodeRegionEntry ent = regionEntry(regionIndex);
+ if (nativeOffset < ent.nativeOffset()) {
+ return false;
+ }
+
+ if (regionIndex == numRegions_ - 1) {
+ return true;
+ }
+
+ return nativeOffset < regionEntry(regionIndex + 1).nativeOffset();
+ }
+
+ uint32_t findRegionEntry(uint32_t offset) const;
+
+ const uint8_t* payloadStart() const {
+ // The beginning of the payload the beginning of the first region are the
+ // same.
+ return payloadEnd() - regionOffset(0);
+ }
+
+ [[nodiscard]] static bool WriteIonTable(CompactBufferWriter& writer,
+ JSScript** scriptList,
+ uint32_t scriptListSize,
+ const NativeToBytecode* start,
+ const NativeToBytecode* end,
+ uint32_t* tableOffsetOut,
+ uint32_t* numRegionsOut);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_JitcodeMap_h */