diff options
Diffstat (limited to 'tools/profiler/core')
31 files changed, 17889 insertions, 0 deletions
diff --git a/tools/profiler/core/EHABIStackWalk.cpp b/tools/profiler/core/EHABIStackWalk.cpp new file mode 100644 index 0000000000..ee3e824e23 --- /dev/null +++ b/tools/profiler/core/EHABIStackWalk.cpp @@ -0,0 +1,589 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * This is an implementation of stack unwinding according to a subset + * of the ARM Exception Handling ABI, as described in: + * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038a/IHI0038A_ehabi.pdf + * + * This handles only the ARM-defined "personality routines" (chapter + * 9), and don't track the value of FP registers, because profiling + * needs only chain of PC/SP values. + * + * Because the exception handling info may not be accurate for all + * possible places where an async signal could occur (e.g., in a + * prologue or epilogue), this bounds-checks all stack accesses. + * + * This file uses "struct" for structures in the exception tables and + * "class" otherwise. We should avoid violating the C++11 + * standard-layout rules in the former. + */ + +#include "EHABIStackWalk.h" + +#include "shared-libraries.h" +#include "platform.h" + +#include "mozilla/Atomics.h" +#include "mozilla/Attributes.h" +#include "mozilla/DebugOnly.h" +#include "mozilla/EndianUtils.h" + +#include <algorithm> +#include <elf.h> +#include <stdint.h> +#include <vector> +#include <string> + +#ifndef PT_ARM_EXIDX +# define PT_ARM_EXIDX 0x70000001 +#endif + +namespace mozilla { + +struct PRel31 { + uint32_t mBits; + bool topBit() const { return mBits & 0x80000000; } + uint32_t value() const { return mBits & 0x7fffffff; } + int32_t offset() const { return (static_cast<int32_t>(mBits) << 1) >> 1; } + const void* compute() const { + return reinterpret_cast<const char*>(this) + offset(); + } + + private: + PRel31(const PRel31& copied) = delete; + PRel31() = delete; +}; + +struct EHEntry { + PRel31 startPC; + PRel31 exidx; + + private: + EHEntry(const EHEntry& copied) = delete; + EHEntry() = delete; +}; + +class EHState { + // Note that any core register can be used as a "frame pointer" to + // influence the unwinding process, so this must track all of them. + uint32_t mRegs[16]; + + public: + bool unwind(const EHEntry* aEntry, const void* stackBase); + uint32_t& operator[](int i) { return mRegs[i]; } + const uint32_t& operator[](int i) const { return mRegs[i]; } + explicit EHState(const mcontext_t&); +}; + +enum { R_SP = 13, R_LR = 14, R_PC = 15 }; + +class EHTable { + uint32_t mStartPC; + uint32_t mEndPC; + uint32_t mBaseAddress; + const EHEntry* mEntriesBegin; + const EHEntry* mEntriesEnd; + std::string mName; + + public: + EHTable(const void* aELF, size_t aSize, const std::string& aName); + const EHEntry* lookup(uint32_t aPC) const; + bool isValid() const { return mEntriesEnd != mEntriesBegin; } + const std::string& name() const { return mName; } + uint32_t startPC() const { return mStartPC; } + uint32_t endPC() const { return mEndPC; } + uint32_t baseAddress() const { return mBaseAddress; } +}; + +class EHAddrSpace { + std::vector<uint32_t> mStarts; + std::vector<EHTable> mTables; + static mozilla::Atomic<const EHAddrSpace*> sCurrent; + + public: + explicit EHAddrSpace(const std::vector<EHTable>& aTables); + const EHTable* lookup(uint32_t aPC) const; + static void Update(); + static const EHAddrSpace* Get(); +}; + +void EHABIStackWalkInit() { EHAddrSpace::Update(); } + +size_t EHABIStackWalk(const mcontext_t& aContext, void* stackBase, void** aSPs, + void** aPCs, const size_t aNumFrames) { + const EHAddrSpace* space = EHAddrSpace::Get(); + EHState state(aContext); + size_t count = 0; + + while (count < aNumFrames) { + uint32_t pc = state[R_PC], sp = state[R_SP]; + aPCs[count] = reinterpret_cast<void*>(pc); + aSPs[count] = reinterpret_cast<void*>(sp); + count++; + + if (!space) break; + // TODO: cache these lookups. Binary-searching libxul is + // expensive (possibly more expensive than doing the actual + // unwind), and even a small cache should help. + const EHTable* table = space->lookup(pc); + if (!table) break; + const EHEntry* entry = table->lookup(pc); + if (!entry) break; + if (!state.unwind(entry, stackBase)) break; + } + + return count; +} + +class EHInterp { + public: + // Note that stackLimit is exclusive and stackBase is inclusive + // (i.e, stackLimit < SP <= stackBase), following the convention + // set by the AAPCS spec. + EHInterp(EHState& aState, const EHEntry* aEntry, uint32_t aStackLimit, + uint32_t aStackBase) + : mState(aState), + mStackLimit(aStackLimit), + mStackBase(aStackBase), + mNextWord(0), + mWordsLeft(0), + mFailed(false) { + const PRel31& exidx = aEntry->exidx; + uint32_t firstWord; + + if (exidx.mBits == 1) { // EXIDX_CANTUNWIND + mFailed = true; + return; + } + if (exidx.topBit()) { + firstWord = exidx.mBits; + } else { + mNextWord = reinterpret_cast<const uint32_t*>(exidx.compute()); + firstWord = *mNextWord++; + } + + switch (firstWord >> 24) { + case 0x80: // short + mWord = firstWord << 8; + mBytesLeft = 3; + break; + case 0x81: + case 0x82: // long; catch descriptor size ignored + mWord = firstWord << 16; + mBytesLeft = 2; + mWordsLeft = (firstWord >> 16) & 0xff; + break; + default: + // unknown personality + mFailed = true; + } + } + + bool unwind(); + + private: + // TODO: GCC has been observed not CSEing repeated reads of + // mState[R_SP] with writes to mFailed between them, suggesting that + // it hasn't determined that they can't alias and is thus missing + // optimization opportunities. So, we may want to flatten EHState + // into this class; this may also make the code simpler. + EHState& mState; + uint32_t mStackLimit; + uint32_t mStackBase; + const uint32_t* mNextWord; + uint32_t mWord; + uint8_t mWordsLeft; + uint8_t mBytesLeft; + bool mFailed; + + enum { + I_ADDSP = 0x00, // 0sxxxxxx (subtract if s) + M_ADDSP = 0x80, + I_POPMASK = 0x80, // 1000iiii iiiiiiii (if any i set) + M_POPMASK = 0xf0, + I_MOVSP = 0x90, // 1001nnnn + M_MOVSP = 0xf0, + I_POPN = 0xa0, // 1010lnnn + M_POPN = 0xf0, + I_FINISH = 0xb0, // 10110000 + I_POPLO = 0xb1, // 10110001 0000iiii (if any i set) + I_ADDSPBIG = 0xb2, // 10110010 uleb128 + I_POPFDX = 0xb3, // 10110011 sssscccc + I_POPFDX8 = 0xb8, // 10111nnn + M_POPFDX8 = 0xf8, + // "Intel Wireless MMX" extensions omitted. + I_POPFDD = 0xc8, // 1100100h sssscccc + M_POPFDD = 0xfe, + I_POPFDD8 = 0xd0, // 11010nnn + M_POPFDD8 = 0xf8 + }; + + uint8_t next() { + if (mBytesLeft == 0) { + if (mWordsLeft == 0) { + return I_FINISH; + } + mWordsLeft--; + mWord = *mNextWord++; + mBytesLeft = 4; + } + mBytesLeft--; + mWord = (mWord << 8) | (mWord >> 24); // rotate + return mWord; + } + + uint32_t& vSP() { return mState[R_SP]; } + uint32_t* ptrSP() { return reinterpret_cast<uint32_t*>(vSP()); } + + void checkStackBase() { + if (vSP() > mStackBase) mFailed = true; + } + void checkStackLimit() { + if (vSP() <= mStackLimit) mFailed = true; + } + void checkStackAlign() { + if ((vSP() & 3) != 0) mFailed = true; + } + void checkStack() { + checkStackBase(); + checkStackLimit(); + checkStackAlign(); + } + + void popRange(uint8_t first, uint8_t last, uint16_t mask) { + bool hasSP = false; + uint32_t tmpSP; + if (mask == 0) mFailed = true; + for (uint8_t r = first; r <= last; ++r) { + if (mask & 1) { + if (r == R_SP) { + hasSP = true; + tmpSP = *ptrSP(); + } else + mState[r] = *ptrSP(); + vSP() += 4; + checkStackBase(); + if (mFailed) return; + } + mask >>= 1; + } + if (hasSP) { + vSP() = tmpSP; + checkStack(); + } + } +}; + +bool EHState::unwind(const EHEntry* aEntry, const void* stackBasePtr) { + // The unwinding program cannot set SP to less than the initial value. + uint32_t stackLimit = mRegs[R_SP] - 4; + uint32_t stackBase = reinterpret_cast<uint32_t>(stackBasePtr); + EHInterp interp(*this, aEntry, stackLimit, stackBase); + return interp.unwind(); +} + +bool EHInterp::unwind() { + mState[R_PC] = 0; + checkStack(); + while (!mFailed) { + uint8_t insn = next(); +#if DEBUG_EHABI_UNWIND + LOG("unwind insn = %02x", (unsigned)insn); +#endif + // Try to put the common cases first. + + // 00xxxxxx: vsp = vsp + (xxxxxx << 2) + 4 + // 01xxxxxx: vsp = vsp - (xxxxxx << 2) - 4 + if ((insn & M_ADDSP) == I_ADDSP) { + uint32_t offset = ((insn & 0x3f) << 2) + 4; + if (insn & 0x40) { + vSP() -= offset; + checkStackLimit(); + } else { + vSP() += offset; + checkStackBase(); + } + continue; + } + + // 10100nnn: Pop r4-r[4+nnn] + // 10101nnn: Pop r4-r[4+nnn], r14 + if ((insn & M_POPN) == I_POPN) { + uint8_t n = (insn & 0x07) + 1; + bool lr = insn & 0x08; + uint32_t* ptr = ptrSP(); + vSP() += (n + (lr ? 1 : 0)) * 4; + checkStackBase(); + for (uint8_t r = 4; r < 4 + n; ++r) mState[r] = *ptr++; + if (lr) mState[R_LR] = *ptr++; + continue; + } + + // 1011000: Finish + if (insn == I_FINISH) { + if (mState[R_PC] == 0) { + mState[R_PC] = mState[R_LR]; + // Non-standard change (bug 916106): Prevent the caller from + // re-using LR. Since the caller is by definition not a leaf + // routine, it will have to restore LR from somewhere to + // return to its own caller, so we can safely zero it here. + // This makes a difference only if an error in unwinding + // (e.g., caused by starting from within a prologue/epilogue) + // causes us to load a pointer to a leaf routine as LR; if we + // don't do something, we'll go into an infinite loop of + // "returning" to that same function. + mState[R_LR] = 0; + } + return true; + } + + // 1001nnnn: Set vsp = r[nnnn] + if ((insn & M_MOVSP) == I_MOVSP) { + vSP() = mState[insn & 0x0f]; + checkStack(); + continue; + } + + // 11001000 sssscccc: Pop VFP regs D[16+ssss]-D[16+ssss+cccc] (as FLDMFDD) + // 11001001 sssscccc: Pop VFP regs D[ssss]-D[ssss+cccc] (as FLDMFDD) + if ((insn & M_POPFDD) == I_POPFDD) { + uint8_t n = (next() & 0x0f) + 1; + // Note: if the 16+ssss+cccc > 31, the encoding is reserved. + // As the space is currently unused, we don't try to check. + vSP() += 8 * n; + checkStackBase(); + continue; + } + + // 11010nnn: Pop VFP regs D[8]-D[8+nnn] (as FLDMFDD) + if ((insn & M_POPFDD8) == I_POPFDD8) { + uint8_t n = (insn & 0x07) + 1; + vSP() += 8 * n; + checkStackBase(); + continue; + } + + // 10110010 uleb128: vsp = vsp + 0x204 + (uleb128 << 2) + if (insn == I_ADDSPBIG) { + uint32_t acc = 0; + uint8_t shift = 0; + uint8_t byte; + do { + if (shift >= 32) return false; + byte = next(); + acc |= (byte & 0x7f) << shift; + shift += 7; + } while (byte & 0x80); + uint32_t offset = 0x204 + (acc << 2); + // The calculations above could have overflowed. + // But the one we care about is this: + if (vSP() + offset < vSP()) mFailed = true; + vSP() += offset; + // ...so that this is the only other check needed: + checkStackBase(); + continue; + } + + // 1000iiii iiiiiiii (i not all 0): Pop under masks {r15-r12}, {r11-r4} + if ((insn & M_POPMASK) == I_POPMASK) { + popRange(4, 15, ((insn & 0x0f) << 8) | next()); + continue; + } + + // 1011001 0000iiii (i not all 0): Pop under mask {r3-r0} + if (insn == I_POPLO) { + popRange(0, 3, next() & 0x0f); + continue; + } + + // 10110011 sssscccc: Pop VFP regs D[ssss]-D[ssss+cccc] (as FLDMFDX) + if (insn == I_POPFDX) { + uint8_t n = (next() & 0x0f) + 1; + vSP() += 8 * n + 4; + checkStackBase(); + continue; + } + + // 10111nnn: Pop VFP regs D[8]-D[8+nnn] (as FLDMFDX) + if ((insn & M_POPFDX8) == I_POPFDX8) { + uint8_t n = (insn & 0x07) + 1; + vSP() += 8 * n + 4; + checkStackBase(); + continue; + } + + // unhandled instruction +#ifdef DEBUG_EHABI_UNWIND + LOG("Unhandled EHABI instruction 0x%02x", insn); +#endif + mFailed = true; + } + return false; +} + +bool operator<(const EHTable& lhs, const EHTable& rhs) { + return lhs.startPC() < rhs.startPC(); +} + +// Async signal unsafe. +EHAddrSpace::EHAddrSpace(const std::vector<EHTable>& aTables) + : mTables(aTables) { + std::sort(mTables.begin(), mTables.end()); + DebugOnly<uint32_t> lastEnd = 0; + for (std::vector<EHTable>::iterator i = mTables.begin(); i != mTables.end(); + ++i) { + MOZ_ASSERT(i->startPC() >= lastEnd); + mStarts.push_back(i->startPC()); + lastEnd = i->endPC(); + } +} + +const EHTable* EHAddrSpace::lookup(uint32_t aPC) const { + ptrdiff_t i = (std::upper_bound(mStarts.begin(), mStarts.end(), aPC) - + mStarts.begin()) - + 1; + + if (i < 0 || aPC >= mTables[i].endPC()) return 0; + return &mTables[i]; +} + +const EHEntry* EHTable::lookup(uint32_t aPC) const { + MOZ_ASSERT(aPC >= mStartPC); + if (aPC >= mEndPC) return nullptr; + + const EHEntry* begin = mEntriesBegin; + const EHEntry* end = mEntriesEnd; + MOZ_ASSERT(begin < end); + if (aPC < reinterpret_cast<uint32_t>(begin->startPC.compute())) + return nullptr; + + while (end - begin > 1) { +#ifdef EHABI_UNWIND_MORE_ASSERTS + if ((end - 1)->startPC.compute() < begin->startPC.compute()) { + MOZ_CRASH("unsorted exidx"); + } +#endif + const EHEntry* mid = begin + (end - begin) / 2; + if (aPC < reinterpret_cast<uint32_t>(mid->startPC.compute())) + end = mid; + else + begin = mid; + } + return begin; +} + +#if MOZ_LITTLE_ENDIAN() +static const unsigned char hostEndian = ELFDATA2LSB; +#elif MOZ_BIG_ENDIAN() +static const unsigned char hostEndian = ELFDATA2MSB; +#else +# error "No endian?" +#endif + +// Async signal unsafe: std::vector::reserve, std::string copy ctor. +EHTable::EHTable(const void* aELF, size_t aSize, const std::string& aName) + : mStartPC(~0), // largest uint32_t + mEndPC(0), + mEntriesBegin(nullptr), + mEntriesEnd(nullptr), + mName(aName) { + const uint32_t fileHeaderAddr = reinterpret_cast<uint32_t>(aELF); + + if (aSize < sizeof(Elf32_Ehdr)) return; + + const Elf32_Ehdr& file = *(reinterpret_cast<Elf32_Ehdr*>(fileHeaderAddr)); + if (memcmp(&file.e_ident[EI_MAG0], ELFMAG, SELFMAG) != 0 || + file.e_ident[EI_CLASS] != ELFCLASS32 || + file.e_ident[EI_DATA] != hostEndian || + file.e_ident[EI_VERSION] != EV_CURRENT || file.e_machine != EM_ARM || + file.e_version != EV_CURRENT) + // e_flags? + return; + + MOZ_ASSERT(file.e_phoff + file.e_phnum * file.e_phentsize <= aSize); + const Elf32_Phdr *exidxHdr = 0, *zeroHdr = 0; + for (unsigned i = 0; i < file.e_phnum; ++i) { + const Elf32_Phdr& phdr = *(reinterpret_cast<Elf32_Phdr*>( + fileHeaderAddr + file.e_phoff + i * file.e_phentsize)); + if (phdr.p_type == PT_ARM_EXIDX) { + exidxHdr = &phdr; + } else if (phdr.p_type == PT_LOAD) { + if (phdr.p_offset == 0) { + zeroHdr = &phdr; + } + if (phdr.p_flags & PF_X) { + mStartPC = std::min(mStartPC, phdr.p_vaddr); + mEndPC = std::max(mEndPC, phdr.p_vaddr + phdr.p_memsz); + } + } + } + if (!exidxHdr) return; + if (!zeroHdr) return; + mBaseAddress = fileHeaderAddr - zeroHdr->p_vaddr; + mStartPC += mBaseAddress; + mEndPC += mBaseAddress; + mEntriesBegin = + reinterpret_cast<const EHEntry*>(mBaseAddress + exidxHdr->p_vaddr); + mEntriesEnd = reinterpret_cast<const EHEntry*>( + mBaseAddress + exidxHdr->p_vaddr + exidxHdr->p_memsz); +} + +mozilla::Atomic<const EHAddrSpace*> EHAddrSpace::sCurrent(nullptr); + +// Async signal safe; can fail if Update() hasn't returned yet. +const EHAddrSpace* EHAddrSpace::Get() { return sCurrent; } + +// Collect unwinding information from loaded objects. Calls after the +// first have no effect. Async signal unsafe. +void EHAddrSpace::Update() { + const EHAddrSpace* space = sCurrent; + if (space) return; + + SharedLibraryInfo info = SharedLibraryInfo::GetInfoForSelf(); + std::vector<EHTable> tables; + + for (size_t i = 0; i < info.GetSize(); ++i) { + const SharedLibrary& lib = info.GetEntry(i); + // FIXME: This isn't correct if the start address isn't p_offset 0, because + // the start address will not point at the file header. But this is worked + // around by magic number checks in the EHTable constructor. + EHTable tab(reinterpret_cast<const void*>(lib.GetStart()), + lib.GetEnd() - lib.GetStart(), lib.GetNativeDebugPath()); + if (tab.isValid()) tables.push_back(tab); + } + space = new EHAddrSpace(tables); + + if (!sCurrent.compareExchange(nullptr, space)) { + delete space; + space = sCurrent; + } +} + +EHState::EHState(const mcontext_t& context) { +#ifdef linux + mRegs[0] = context.arm_r0; + mRegs[1] = context.arm_r1; + mRegs[2] = context.arm_r2; + mRegs[3] = context.arm_r3; + mRegs[4] = context.arm_r4; + mRegs[5] = context.arm_r5; + mRegs[6] = context.arm_r6; + mRegs[7] = context.arm_r7; + mRegs[8] = context.arm_r8; + mRegs[9] = context.arm_r9; + mRegs[10] = context.arm_r10; + mRegs[11] = context.arm_fp; + mRegs[12] = context.arm_ip; + mRegs[13] = context.arm_sp; + mRegs[14] = context.arm_lr; + mRegs[15] = context.arm_pc; +#else +# error "Unhandled OS for ARM EHABI unwinding" +#endif +} + +} // namespace mozilla diff --git a/tools/profiler/core/EHABIStackWalk.h b/tools/profiler/core/EHABIStackWalk.h new file mode 100644 index 0000000000..61286290b8 --- /dev/null +++ b/tools/profiler/core/EHABIStackWalk.h @@ -0,0 +1,28 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * This is an implementation of stack unwinding according to a subset + * of the ARM Exception Handling ABI; see the comment at the top of + * the .cpp file for details. + */ + +#ifndef mozilla_EHABIStackWalk_h__ +#define mozilla_EHABIStackWalk_h__ + +#include <stddef.h> +#include <ucontext.h> + +namespace mozilla { + +void EHABIStackWalkInit(); + +size_t EHABIStackWalk(const mcontext_t& aContext, void* stackBase, void** aSPs, + void** aPCs, size_t aNumFrames); + +} // namespace mozilla + +#endif diff --git a/tools/profiler/core/PageInformation.cpp b/tools/profiler/core/PageInformation.cpp new file mode 100644 index 0000000000..b4ba33f6be --- /dev/null +++ b/tools/profiler/core/PageInformation.cpp @@ -0,0 +1,41 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "PageInformation.h" + +#include "mozilla/ProfileJSONWriter.h" + +PageInformation::PageInformation(uint64_t aBrowsingContextID, + uint64_t aInnerWindowID, const nsCString& aUrl, + uint64_t aEmbedderInnerWindowID) + : mBrowsingContextID(aBrowsingContextID), + mInnerWindowID(aInnerWindowID), + mUrl(aUrl), + mEmbedderInnerWindowID(aEmbedderInnerWindowID) {} + +bool PageInformation::Equals(PageInformation* aOtherPageInfo) const { + // It's enough to check inner window IDs because they are unique for each + // page. Therefore, we don't have to check browsing context ID or url. + return InnerWindowID() == aOtherPageInfo->InnerWindowID(); +} + +void PageInformation::StreamJSON(SpliceableJSONWriter& aWriter) const { + // Here, we are converting uint64_t to double. Both Browsing Context and Inner + // Window IDs are creating using `nsContentUtils::GenerateProcessSpecificId`, + // which is specifically designed to only use 53 of the 64 bits to be lossless + // when passed into and out of JS as a double. + aWriter.StartObjectElement(); + aWriter.DoubleProperty("browsingContextID", BrowsingContextID()); + aWriter.DoubleProperty("innerWindowID", InnerWindowID()); + aWriter.StringProperty("url", Url()); + aWriter.DoubleProperty("embedderInnerWindowID", EmbedderInnerWindowID()); + aWriter.EndObject(); +} + +size_t PageInformation::SizeOfIncludingThis( + mozilla::MallocSizeOf aMallocSizeOf) const { + return aMallocSizeOf(this); +} diff --git a/tools/profiler/core/PageInformation.h b/tools/profiler/core/PageInformation.h new file mode 100644 index 0000000000..0f10c6e99d --- /dev/null +++ b/tools/profiler/core/PageInformation.h @@ -0,0 +1,65 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef PageInformation_h +#define PageInformation_h + +#include "mozilla/Maybe.h" +#include "mozilla/MemoryReporting.h" +#include "nsISupportsImpl.h" +#include "nsString.h" + +namespace mozilla { +namespace baseprofiler { +class SpliceableJSONWriter; +} // namespace baseprofiler +} // namespace mozilla + +// This class contains information that's relevant to a single page only +// while the page information is important and registered with the profiler, +// but regardless of whether the profiler is running. All accesses to it are +// protected by the profiler state lock. +// When the page gets unregistered, we keep the profiler buffer position +// to determine if we are still using this page. If not, we unregister +// it in the next page registration. +class PageInformation final { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PageInformation) + PageInformation(uint64_t aBrowsingContextID, uint64_t aInnerWindowID, + const nsCString& aUrl, uint64_t aEmbedderInnerWindowID); + + size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + bool Equals(PageInformation* aOtherPageInfo) const; + void StreamJSON(mozilla::baseprofiler::SpliceableJSONWriter& aWriter) const; + + uint64_t InnerWindowID() const { return mInnerWindowID; } + uint64_t BrowsingContextID() const { return mBrowsingContextID; } + const nsCString& Url() const { return mUrl; } + uint64_t EmbedderInnerWindowID() const { return mEmbedderInnerWindowID; } + + mozilla::Maybe<uint64_t> BufferPositionWhenUnregistered() const { + return mBufferPositionWhenUnregistered; + } + + void NotifyUnregistered(uint64_t aBufferPosition) { + mBufferPositionWhenUnregistered = mozilla::Some(aBufferPosition); + } + + private: + const uint64_t mBrowsingContextID; + const uint64_t mInnerWindowID; + const nsCString mUrl; + const uint64_t mEmbedderInnerWindowID; + + // Holds the buffer position when page is unregistered. + // It's used to determine if we still use this page in the profiler or + // not. + mozilla::Maybe<uint64_t> mBufferPositionWhenUnregistered; + + virtual ~PageInformation() = default; +}; + +#endif // PageInformation_h diff --git a/tools/profiler/core/PlatformMacros.h b/tools/profiler/core/PlatformMacros.h new file mode 100644 index 0000000000..c72e94c128 --- /dev/null +++ b/tools/profiler/core/PlatformMacros.h @@ -0,0 +1,130 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef PLATFORM_MACROS_H +#define PLATFORM_MACROS_H + +// Define platform selection macros in a consistent way. Don't add anything +// else to this file, so it can remain freestanding. The primary factorisation +// is on (ARCH,OS) pairs ("PLATforms") but ARCH_ and OS_ macros are defined +// too, since they are sometimes convenient. +// +// Note: "GP" is short for "Gecko Profiler". + +#undef GP_PLAT_x86_android +#undef GP_PLAT_amd64_android +#undef GP_PLAT_arm_android +#undef GP_PLAT_arm64_android +#undef GP_PLAT_x86_linux +#undef GP_PLAT_amd64_linux +#undef GP_PLAT_arm_linux +#undef GP_PLAT_mips64_linux +#undef GP_PLAT_amd64_darwin +#undef GP_PLAT_arm64_darwin +#undef GP_PLAT_x86_windows +#undef GP_PLAT_amd64_windows +#undef GP_PLAT_arm64_windows + +#undef GP_ARCH_x86 +#undef GP_ARCH_amd64 +#undef GP_ARCH_arm +#undef GP_ARCH_arm64 +#undef GP_ARCH_mips64 + +#undef GP_OS_android +#undef GP_OS_linux +#undef GP_OS_darwin +#undef GP_OS_windows + +// We test __ANDROID__ before __linux__ because __linux__ is defined on both +// Android and Linux, whereas GP_OS_android is not defined on vanilla Linux. + +#if defined(__ANDROID__) && defined(__i386__) +# define GP_PLAT_x86_android 1 +# define GP_ARCH_x86 1 +# define GP_OS_android 1 + +#elif defined(__ANDROID__) && defined(__x86_64__) +# define GP_PLAT_amd64_android 1 +# define GP_ARCH_amd64 1 +# define GP_OS_android 1 + +#elif defined(__ANDROID__) && defined(__arm__) +# define GP_PLAT_arm_android 1 +# define GP_ARCH_arm 1 +# define GP_OS_android 1 + +#elif defined(__ANDROID__) && defined(__aarch64__) +# define GP_PLAT_arm64_android 1 +# define GP_ARCH_arm64 1 +# define GP_OS_android 1 + +#elif defined(__linux__) && defined(__i386__) +# define GP_PLAT_x86_linux 1 +# define GP_ARCH_x86 1 +# define GP_OS_linux 1 + +#elif defined(__linux__) && defined(__x86_64__) +# define GP_PLAT_amd64_linux 1 +# define GP_ARCH_amd64 1 +# define GP_OS_linux 1 + +#elif defined(__linux__) && defined(__arm__) +# define GP_PLAT_arm_linux 1 +# define GP_ARCH_arm 1 +# define GP_OS_linux 1 + +#elif defined(__linux__) && defined(__aarch64__) +# define GP_PLAT_arm64_linux 1 +# define GP_ARCH_arm64 1 +# define GP_OS_linux 1 + +#elif defined(__linux__) && defined(__mips64) +# define GP_PLAT_mips64_linux 1 +# define GP_ARCH_mips64 1 +# define GP_OS_linux 1 + +#elif defined(__APPLE__) && defined(__aarch64__) +# define GP_PLAT_arm64_darwin 1 +# define GP_ARCH_arm64 1 +# define GP_OS_darwin 1 + +#elif defined(__APPLE__) && defined(__x86_64__) +# define GP_PLAT_amd64_darwin 1 +# define GP_ARCH_amd64 1 +# define GP_OS_darwin 1 + +#elif defined(__FreeBSD__) && defined(__x86_64__) +# define GP_PLAT_amd64_freebsd 1 +# define GP_ARCH_amd64 1 +# define GP_OS_freebsd 1 + +#elif defined(__FreeBSD__) && defined(__aarch64__) +# define GP_PLAT_arm64_freebsd 1 +# define GP_ARCH_arm64 1 +# define GP_OS_freebsd 1 + +#elif (defined(_MSC_VER) || defined(__MINGW32__)) && \ + (defined(_M_IX86) || defined(__i386__)) +# define GP_PLAT_x86_windows 1 +# define GP_ARCH_x86 1 +# define GP_OS_windows 1 + +#elif (defined(_MSC_VER) || defined(__MINGW32__)) && \ + (defined(_M_X64) || defined(__x86_64__)) +# define GP_PLAT_amd64_windows 1 +# define GP_ARCH_amd64 1 +# define GP_OS_windows 1 + +#elif defined(_MSC_VER) && defined(_M_ARM64) +# define GP_PLAT_arm64_windows 1 +# define GP_ARCH_arm64 1 +# define GP_OS_windows 1 + +#else +# error "Unsupported platform" +#endif + +#endif /* ndef PLATFORM_MACROS_H */ diff --git a/tools/profiler/core/ProfileBuffer.cpp b/tools/profiler/core/ProfileBuffer.cpp new file mode 100644 index 0000000000..887e7cc70a --- /dev/null +++ b/tools/profiler/core/ProfileBuffer.cpp @@ -0,0 +1,236 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ProfileBuffer.h" + +#include "BaseProfiler.h" +#include "js/GCAPI.h" +#include "jsfriendapi.h" +#include "mozilla/MathAlgorithms.h" +#include "nsJSPrincipals.h" +#include "nsScriptSecurityManager.h" + +using namespace mozilla; + +ProfileBuffer::ProfileBuffer(ProfileChunkedBuffer& aBuffer) + : mEntries(aBuffer) { + // Assume the given buffer is in-session. + MOZ_ASSERT(mEntries.IsInSession()); +} + +/* static */ +ProfileBufferBlockIndex ProfileBuffer::AddEntry( + ProfileChunkedBuffer& aProfileChunkedBuffer, + const ProfileBufferEntry& aEntry) { + switch (aEntry.GetKind()) { +#define SWITCH_KIND(KIND, TYPE, SIZE) \ + case ProfileBufferEntry::Kind::KIND: { \ + return aProfileChunkedBuffer.PutFrom(&aEntry, 1 + (SIZE)); \ + } + + FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(SWITCH_KIND) + +#undef SWITCH_KIND + default: + MOZ_ASSERT(false, "Unhandled ProfilerBuffer entry KIND"); + return ProfileBufferBlockIndex{}; + } +} + +// Called from signal, call only reentrant functions +uint64_t ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) { + return AddEntry(mEntries, aEntry).ConvertToProfileBufferIndex(); +} + +/* static */ +ProfileBufferBlockIndex ProfileBuffer::AddThreadIdEntry( + ProfileChunkedBuffer& aProfileChunkedBuffer, int aThreadId) { + return AddEntry(aProfileChunkedBuffer, + ProfileBufferEntry::ThreadId(aThreadId)); +} + +uint64_t ProfileBuffer::AddThreadIdEntry(int aThreadId) { + return AddThreadIdEntry(mEntries, aThreadId).ConvertToProfileBufferIndex(); +} + +void ProfileBuffer::CollectCodeLocation( + const char* aLabel, const char* aStr, uint32_t aFrameFlags, + uint64_t aInnerWindowID, const Maybe<uint32_t>& aLineNumber, + const Maybe<uint32_t>& aColumnNumber, + const Maybe<JS::ProfilingCategoryPair>& aCategoryPair) { + AddEntry(ProfileBufferEntry::Label(aLabel)); + AddEntry(ProfileBufferEntry::FrameFlags(uint64_t(aFrameFlags))); + + if (aStr) { + // Store the string using one or more DynamicStringFragment entries. + size_t strLen = strlen(aStr) + 1; // +1 for the null terminator + // If larger than the prescribed limit, we will cut the string and end it + // with an ellipsis. + const bool tooBig = strLen > kMaxFrameKeyLength; + if (tooBig) { + strLen = kMaxFrameKeyLength; + } + char chars[ProfileBufferEntry::kNumChars]; + for (size_t j = 0;; j += ProfileBufferEntry::kNumChars) { + // Store up to kNumChars characters in the entry. + size_t len = ProfileBufferEntry::kNumChars; + const bool last = j + len >= strLen; + if (last) { + // Only the last entry may be smaller than kNumChars. + len = strLen - j; + if (tooBig) { + // That last entry is part of a too-big string, replace the end + // characters with an ellipsis "...". + len = std::max(len, size_t(4)); + chars[len - 4] = '.'; + chars[len - 3] = '.'; + chars[len - 2] = '.'; + chars[len - 1] = '\0'; + // Make sure the memcpy will not overwrite our ellipsis! + len -= 4; + } + } + memcpy(chars, &aStr[j], len); + AddEntry(ProfileBufferEntry::DynamicStringFragment(chars)); + if (last) { + break; + } + } + } + + if (aInnerWindowID) { + AddEntry(ProfileBufferEntry::InnerWindowID(aInnerWindowID)); + } + + if (aLineNumber) { + AddEntry(ProfileBufferEntry::LineNumber(*aLineNumber)); + } + + if (aColumnNumber) { + AddEntry(ProfileBufferEntry::ColumnNumber(*aColumnNumber)); + } + + if (aCategoryPair.isSome()) { + AddEntry(ProfileBufferEntry::CategoryPair(int(*aCategoryPair))); + } +} + +size_t ProfileBuffer::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { + // Measurement of the following members may be added later if DMD finds it + // is worthwhile: + // - memory pointed to by the elements within mEntries + return mEntries.SizeOfExcludingThis(aMallocSizeOf); +} + +size_t ProfileBuffer::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +void ProfileBuffer::CollectOverheadStats(double aSamplingTimeMs, + TimeDuration aLocking, + TimeDuration aCleaning, + TimeDuration aCounters, + TimeDuration aThreads) { + double timeUs = aSamplingTimeMs * 1000.0; + if (mFirstSamplingTimeUs == 0.0) { + mFirstSamplingTimeUs = timeUs; + } else { + // Note that we'll have 1 fewer interval than other numbers (because + // we need both ends of an interval to know its duration). The final + // difference should be insignificant over the expected many thousands + // of iterations. + mIntervalsUs.Count(timeUs - mLastSamplingTimeUs); + } + mLastSamplingTimeUs = timeUs; + double locking = aLocking.ToMilliseconds() * 1000.0; + double cleaning = aCleaning.ToMilliseconds() * 1000.0; + double counters = aCounters.ToMilliseconds() * 1000.0; + double threads = aThreads.ToMilliseconds() * 1000.0; + + mOverheadsUs.Count(locking + cleaning + counters + threads); + mLockingsUs.Count(locking); + mCleaningsUs.Count(cleaning); + mCountersUs.Count(counters); + mThreadsUs.Count(threads); + + AddEntry(ProfileBufferEntry::ProfilerOverheadTime(aSamplingTimeMs)); + AddEntry(ProfileBufferEntry::ProfilerOverheadDuration(locking)); + AddEntry(ProfileBufferEntry::ProfilerOverheadDuration(cleaning)); + AddEntry(ProfileBufferEntry::ProfilerOverheadDuration(counters)); + AddEntry(ProfileBufferEntry::ProfilerOverheadDuration(threads)); +} + +ProfilerBufferInfo ProfileBuffer::GetProfilerBufferInfo() const { + return {BufferRangeStart(), + BufferRangeEnd(), + static_cast<uint32_t>(*mEntries.BufferLength() / + 8), // 8 bytes per entry. + mIntervalsUs, + mOverheadsUs, + mLockingsUs, + mCleaningsUs, + mCountersUs, + mThreadsUs}; +} + +/* ProfileBufferCollector */ + +void ProfileBufferCollector::CollectNativeLeafAddr(void* aAddr) { + mBuf.AddEntry(ProfileBufferEntry::NativeLeafAddr(aAddr)); +} + +void ProfileBufferCollector::CollectJitReturnAddr(void* aAddr) { + mBuf.AddEntry(ProfileBufferEntry::JitReturnAddr(aAddr)); +} + +void ProfileBufferCollector::CollectWasmFrame(const char* aLabel) { + mBuf.CollectCodeLocation("", aLabel, 0, 0, Nothing(), Nothing(), Nothing()); +} + +void ProfileBufferCollector::CollectProfilingStackFrame( + const js::ProfilingStackFrame& aFrame) { + // WARNING: this function runs within the profiler's "critical section". + + MOZ_ASSERT(aFrame.isLabelFrame() || + (aFrame.isJsFrame() && !aFrame.isOSRFrame())); + + const char* label = aFrame.label(); + const char* dynamicString = aFrame.dynamicString(); + Maybe<uint32_t> line; + Maybe<uint32_t> column; + + if (aFrame.isJsFrame()) { + // There are two kinds of JS frames that get pushed onto the ProfilingStack. + // + // - label = "", dynamic string = <something> + // - label = "js::RunScript", dynamic string = nullptr + // + // The line number is only interesting in the first case. + + if (label[0] == '\0') { + MOZ_ASSERT(dynamicString); + + // We call aFrame.script() repeatedly -- rather than storing the result in + // a local variable in order -- to avoid rooting hazards. + if (aFrame.script()) { + if (aFrame.pc()) { + unsigned col = 0; + line = Some(JS_PCToLineNumber(aFrame.script(), aFrame.pc(), &col)); + column = Some(col); + } + } + + } else { + MOZ_ASSERT(strcmp(label, "js::RunScript") == 0 && !dynamicString); + } + } else { + MOZ_ASSERT(aFrame.isLabelFrame()); + } + + mBuf.CollectCodeLocation(label, dynamicString, aFrame.flags(), + aFrame.realmID(), line, column, + Some(aFrame.categoryPair())); +} diff --git a/tools/profiler/core/ProfileBuffer.h b/tools/profiler/core/ProfileBuffer.h new file mode 100644 index 0000000000..e172577566 --- /dev/null +++ b/tools/profiler/core/ProfileBuffer.h @@ -0,0 +1,215 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZ_PROFILE_BUFFER_H +#define MOZ_PROFILE_BUFFER_H + +#include "GeckoProfiler.h" +#include "ProfileBufferEntry.h" + +#include "mozilla/Maybe.h" +#include "mozilla/PowerOfTwo.h" +#include "mozilla/ProfileBufferChunkManagerSingle.h" +#include "mozilla/ProfileChunkedBuffer.h" + +class RunningTimes; + +// Class storing most profiling data in a ProfileChunkedBuffer. +// +// This class is used as a queue of entries which, after construction, never +// allocates. This makes it safe to use in the profiler's "critical section". +class ProfileBuffer final { + public: + // ProfileBuffer constructor + // @param aBuffer The in-session ProfileChunkedBuffer to use as buffer + // manager. + explicit ProfileBuffer(mozilla::ProfileChunkedBuffer& aBuffer); + + mozilla::ProfileChunkedBuffer& UnderlyingChunkedBuffer() const { + return mEntries; + } + + bool IsThreadSafe() const { return mEntries.IsThreadSafe(); } + + // Add |aEntry| to the buffer, ignoring what kind of entry it is. + uint64_t AddEntry(const ProfileBufferEntry& aEntry); + + // Add to the buffer a sample start (ThreadId) entry for aThreadId. + // Returns the position of the entry. + uint64_t AddThreadIdEntry(int aThreadId); + + void CollectCodeLocation( + const char* aLabel, const char* aStr, uint32_t aFrameFlags, + uint64_t aInnerWindowID, const mozilla::Maybe<uint32_t>& aLineNumber, + const mozilla::Maybe<uint32_t>& aColumnNumber, + const mozilla::Maybe<JS::ProfilingCategoryPair>& aCategoryPair); + + // Maximum size of a frameKey string that we'll handle. + static const size_t kMaxFrameKeyLength = 512; + + // Add JIT frame information to aJITFrameInfo for any JitReturnAddr entries + // that are currently in the buffer at or after aRangeStart, in samples + // for the given thread. + void AddJITInfoForRange(uint64_t aRangeStart, int aThreadId, + JSContext* aContext, + JITFrameInfo& aJITFrameInfo) const; + + // Stream JSON for samples in the buffer to aWriter, using the supplied + // UniqueStacks object. + // Only streams samples for the given thread ID and which were taken at or + // after aSinceTime. If ID is 0, ignore the stored thread ID; this should only + // be used when the buffer contains only one sample. + // aUniqueStacks needs to contain information about any JIT frames that we + // might encounter in the buffer, before this method is called. In other + // words, you need to have called AddJITInfoForRange for every range that + // might contain JIT frame information before calling this method. + // Return the thread ID of the streamed sample(s), or 0. + int StreamSamplesToJSON(SpliceableJSONWriter& aWriter, int aThreadId, + double aSinceTime, UniqueStacks& aUniqueStacks) const; + + void StreamMarkersToJSON(SpliceableJSONWriter& aWriter, int aThreadId, + const mozilla::TimeStamp& aProcessStartTime, + double aSinceTime, + UniqueStacks& aUniqueStacks) const; + void StreamPausedRangesToJSON(SpliceableJSONWriter& aWriter, + double aSinceTime) const; + void StreamProfilerOverheadToJSON(SpliceableJSONWriter& aWriter, + const mozilla::TimeStamp& aProcessStartTime, + double aSinceTime) const; + void StreamCountersToJSON(SpliceableJSONWriter& aWriter, + const mozilla::TimeStamp& aProcessStartTime, + double aSinceTime) const; + + // Find (via |aLastSample|) the most recent sample for the thread denoted by + // |aThreadId| and clone it, patching in the current time as appropriate. + // Mutate |aLastSample| to point to the newly inserted sample. + // Returns whether duplication was successful. + bool DuplicateLastSample(int aThreadId, double aSampleTimeMs, + mozilla::Maybe<uint64_t>& aLastSample, + const RunningTimes& aRunningTimes); + + void DiscardSamplesBeforeTime(double aTime); + + // Read an entry in the buffer. + ProfileBufferEntry GetEntry(uint64_t aPosition) const { + return mEntries.ReadAt( + mozilla::ProfileBufferBlockIndex::CreateFromProfileBufferIndex( + aPosition), + [&](mozilla::Maybe<mozilla::ProfileBufferEntryReader>&& aMER) { + ProfileBufferEntry entry; + if (aMER.isSome()) { + if (aMER->CurrentBlockIndex().ConvertToProfileBufferIndex() == + aPosition) { + // If we're here, it means `aPosition` pointed at a valid block. + MOZ_RELEASE_ASSERT(aMER->RemainingBytes() <= sizeof(entry)); + aMER->ReadBytes(&entry, aMER->RemainingBytes()); + } else { + // EntryReader at the wrong position, pretend to have read + // everything. + aMER->SetRemainingBytes(0); + } + } + return entry; + }); + } + + size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + + void CollectOverheadStats(double aSamplingTimeMs, + mozilla::TimeDuration aLocking, + mozilla::TimeDuration aCleaning, + mozilla::TimeDuration aCounters, + mozilla::TimeDuration aThreads); + + ProfilerBufferInfo GetProfilerBufferInfo() const; + + private: + // Add |aEntry| to the provided ProfileChunkedBuffer. + // `static` because it may be used to add an entry to a `ProfileChunkedBuffer` + // that is not attached to a `ProfileBuffer`. + static mozilla::ProfileBufferBlockIndex AddEntry( + mozilla::ProfileChunkedBuffer& aProfileChunkedBuffer, + const ProfileBufferEntry& aEntry); + + // Add a sample start (ThreadId) entry for aThreadId to the provided + // ProfileChunkedBuffer. Returns the position of the entry. + // `static` because it may be used to add an entry to a `ProfileChunkedBuffer` + // that is not attached to a `ProfileBuffer`. + static mozilla::ProfileBufferBlockIndex AddThreadIdEntry( + mozilla::ProfileChunkedBuffer& aProfileChunkedBuffer, int aThreadId); + + // The storage in which this ProfileBuffer stores its entries. + mozilla::ProfileChunkedBuffer& mEntries; + + public: + // `BufferRangeStart()` and `BufferRangeEnd()` return `uint64_t` values + // corresponding to the first entry and past the last entry stored in + // `mEntries`. + // + // The returned values are not guaranteed to be stable, because other threads + // may also be accessing the buffer concurrently. But they will always + // increase, and can therefore give an indication of how far these values have + // *at least* reached. In particular: + // - Entries whose index is strictly less that `BufferRangeStart()` have been + // discarded by now, so any related data may also be safely discarded. + // - It is safe to try and read entries at any index strictly less than + // `BufferRangeEnd()` -- but note that these reads may fail by the time you + // request them, as old entries get overwritten by new ones. + uint64_t BufferRangeStart() const { return mEntries.GetState().mRangeStart; } + uint64_t BufferRangeEnd() const { return mEntries.GetState().mRangeEnd; } + + private: + // Single pre-allocated chunk (to avoid spurious mallocs), used when: + // - Duplicating sleeping stacks (hence scExpectedMaximumStackSize). + // - Adding JIT info. + // - Streaming stacks to JSON. + // Mutable because it's accessed from non-multithreaded const methods. + mutable mozilla::ProfileBufferChunkManagerSingle mWorkerChunkManager{ + mozilla::ProfileBufferChunk::Create( + mozilla::ProfileBufferChunk::SizeofChunkMetadata() + + mozilla::ProfileBufferChunkManager::scExpectedMaximumStackSize)}; + + double mFirstSamplingTimeUs = 0.0; + double mLastSamplingTimeUs = 0.0; + ProfilerStats mIntervalsUs; + ProfilerStats mOverheadsUs; + ProfilerStats mLockingsUs; + ProfilerStats mCleaningsUs; + ProfilerStats mCountersUs; + ProfilerStats mThreadsUs; +}; + +/** + * Helper type used to implement ProfilerStackCollector. This type is used as + * the collector for MergeStacks by ProfileBuffer. It holds a reference to the + * buffer, as well as additional feature flags which are needed to control the + * data collection strategy + */ +class ProfileBufferCollector final : public ProfilerStackCollector { + public: + ProfileBufferCollector(ProfileBuffer& aBuf, uint64_t aSamplePos) + : mBuf(aBuf), mSamplePositionInBuffer(aSamplePos) {} + + mozilla::Maybe<uint64_t> SamplePositionInBuffer() override { + return mozilla::Some(mSamplePositionInBuffer); + } + + mozilla::Maybe<uint64_t> BufferRangeStart() override { + return mozilla::Some(mBuf.BufferRangeStart()); + } + + virtual void CollectNativeLeafAddr(void* aAddr) override; + virtual void CollectJitReturnAddr(void* aAddr) override; + virtual void CollectWasmFrame(const char* aLabel) override; + virtual void CollectProfilingStackFrame( + const js::ProfilingStackFrame& aFrame) override; + + private: + ProfileBuffer& mBuf; + uint64_t mSamplePositionInBuffer; +}; + +#endif diff --git a/tools/profiler/core/ProfileBufferEntry.cpp b/tools/profiler/core/ProfileBufferEntry.cpp new file mode 100644 index 0000000000..000f9dfbd7 --- /dev/null +++ b/tools/profiler/core/ProfileBufferEntry.cpp @@ -0,0 +1,1778 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ProfileBufferEntry.h" + +#include "mozilla/ProfilerMarkers.h" +#include "platform.h" +#include "ProfileBuffer.h" +#include "ProfilerBacktrace.h" + +#include "js/ProfilingFrameIterator.h" +#include "jsapi.h" +#include "jsfriendapi.h" +#include "mozilla/Logging.h" +#include "mozilla/ScopeExit.h" +#include "mozilla/Sprintf.h" +#include "mozilla/StackWalk.h" +#include "nsThreadUtils.h" +#include "nsXULAppAPI.h" +#include "ProfilerCodeAddressService.h" + +#include <ostream> +#include <type_traits> + +using namespace mozilla; + +//////////////////////////////////////////////////////////////////////// +// BEGIN ProfileBufferEntry + +ProfileBufferEntry::ProfileBufferEntry() + : mKind(Kind::INVALID), mStorage{0, 0, 0, 0, 0, 0, 0, 0} {} + +// aString must be a static string. +ProfileBufferEntry::ProfileBufferEntry(Kind aKind, const char* aString) + : mKind(aKind) { + MOZ_ASSERT(aKind == Kind::Label); + memcpy(mStorage, &aString, sizeof(aString)); +} + +ProfileBufferEntry::ProfileBufferEntry(Kind aKind, char aChars[kNumChars]) + : mKind(aKind) { + MOZ_ASSERT(aKind == Kind::DynamicStringFragment); + memcpy(mStorage, aChars, kNumChars); +} + +ProfileBufferEntry::ProfileBufferEntry(Kind aKind, void* aPtr) : mKind(aKind) { + memcpy(mStorage, &aPtr, sizeof(aPtr)); +} + +ProfileBufferEntry::ProfileBufferEntry(Kind aKind, double aDouble) + : mKind(aKind) { + memcpy(mStorage, &aDouble, sizeof(aDouble)); +} + +ProfileBufferEntry::ProfileBufferEntry(Kind aKind, int aInt) : mKind(aKind) { + memcpy(mStorage, &aInt, sizeof(aInt)); +} + +ProfileBufferEntry::ProfileBufferEntry(Kind aKind, int64_t aInt64) + : mKind(aKind) { + memcpy(mStorage, &aInt64, sizeof(aInt64)); +} + +ProfileBufferEntry::ProfileBufferEntry(Kind aKind, uint64_t aUint64) + : mKind(aKind) { + memcpy(mStorage, &aUint64, sizeof(aUint64)); +} + +const char* ProfileBufferEntry::GetString() const { + const char* result; + memcpy(&result, mStorage, sizeof(result)); + return result; +} + +void* ProfileBufferEntry::GetPtr() const { + void* result; + memcpy(&result, mStorage, sizeof(result)); + return result; +} + +double ProfileBufferEntry::GetDouble() const { + double result; + memcpy(&result, mStorage, sizeof(result)); + return result; +} + +int ProfileBufferEntry::GetInt() const { + int result; + memcpy(&result, mStorage, sizeof(result)); + return result; +} + +int64_t ProfileBufferEntry::GetInt64() const { + int64_t result; + memcpy(&result, mStorage, sizeof(result)); + return result; +} + +uint64_t ProfileBufferEntry::GetUint64() const { + uint64_t result; + memcpy(&result, mStorage, sizeof(result)); + return result; +} + +void ProfileBufferEntry::CopyCharsInto(char (&aOutArray)[kNumChars]) const { + memcpy(aOutArray, mStorage, kNumChars); +} + +// END ProfileBufferEntry +//////////////////////////////////////////////////////////////////////// + +struct TypeInfo { + Maybe<nsCString> mKeyedBy; + Maybe<nsCString> mName; + Maybe<nsCString> mLocation; + Maybe<unsigned> mLineNumber; +}; + +// As mentioned in ProfileBufferEntry.h, the JSON format contains many +// arrays whose elements are laid out according to various schemas to help +// de-duplication. This RAII class helps write these arrays by keeping track of +// the last non-null element written and adding the appropriate number of null +// elements when writing new non-null elements. It also automatically opens and +// closes an array element on the given JSON writer. +// +// You grant the AutoArraySchemaWriter exclusive access to the JSONWriter and +// the UniqueJSONStrings objects for the lifetime of AutoArraySchemaWriter. Do +// not access them independently while the AutoArraySchemaWriter is alive. +// If you need to add complex objects, call FreeFormElement(), which will give +// you temporary access to the writer. +// +// Example usage: +// +// // Define the schema of elements in this type of array: [FOO, BAR, BAZ] +// enum Schema : uint32_t { +// FOO = 0, +// BAR = 1, +// BAZ = 2 +// }; +// +// AutoArraySchemaWriter writer(someJsonWriter, someUniqueStrings); +// if (shouldWriteFoo) { +// writer.IntElement(FOO, getFoo()); +// } +// ... etc ... +// +// The elements need to be added in-order. +class MOZ_RAII AutoArraySchemaWriter { + public: + explicit AutoArraySchemaWriter(SpliceableJSONWriter& aWriter) + : mJSONWriter(aWriter), mNextFreeIndex(0) { + mJSONWriter.StartArrayElement(SpliceableJSONWriter::SingleLineStyle); + } + + ~AutoArraySchemaWriter() { mJSONWriter.EndArray(); } + + template <typename T> + void IntElement(uint32_t aIndex, T aValue) { + static_assert(!std::is_same_v<T, uint64_t>, + "Narrowing uint64 -> int64 conversion not allowed"); + FillUpTo(aIndex); + mJSONWriter.IntElement(static_cast<int64_t>(aValue)); + } + + void DoubleElement(uint32_t aIndex, double aValue) { + FillUpTo(aIndex); + mJSONWriter.DoubleElement(aValue); + } + + void BoolElement(uint32_t aIndex, bool aValue) { + FillUpTo(aIndex); + mJSONWriter.BoolElement(aValue); + } + + protected: + SpliceableJSONWriter& Writer() { return mJSONWriter; } + + void FillUpTo(uint32_t aIndex) { + MOZ_ASSERT(aIndex >= mNextFreeIndex); + mJSONWriter.NullElements(aIndex - mNextFreeIndex); + mNextFreeIndex = aIndex + 1; + } + + private: + SpliceableJSONWriter& mJSONWriter; + uint32_t mNextFreeIndex; +}; + +// Same as AutoArraySchemaWriter, but this can also write strings (output as +// indexes into the table of unique strings). +class MOZ_RAII AutoArraySchemaWithStringsWriter : public AutoArraySchemaWriter { + public: + AutoArraySchemaWithStringsWriter(SpliceableJSONWriter& aWriter, + UniqueJSONStrings& aStrings) + : AutoArraySchemaWriter(aWriter), mStrings(aStrings) {} + + void StringElement(uint32_t aIndex, const Span<const char>& aValue) { + FillUpTo(aIndex); + mStrings.WriteElement(Writer(), aValue); + } + + private: + UniqueJSONStrings& mStrings; +}; + +UniqueStacks::StackKey UniqueStacks::BeginStack(const FrameKey& aFrame) { + return StackKey(GetOrAddFrameIndex(aFrame)); +} + +UniqueStacks::StackKey UniqueStacks::AppendFrame(const StackKey& aStack, + const FrameKey& aFrame) { + return StackKey(aStack, GetOrAddStackIndex(aStack), + GetOrAddFrameIndex(aFrame)); +} + +JITFrameInfoForBufferRange JITFrameInfoForBufferRange::Clone() const { + JITFrameInfoForBufferRange::JITAddressToJITFramesMap jitAddressToJITFramesMap; + MOZ_RELEASE_ASSERT( + jitAddressToJITFramesMap.reserve(mJITAddressToJITFramesMap.count())); + for (auto iter = mJITAddressToJITFramesMap.iter(); !iter.done(); + iter.next()) { + const mozilla::Vector<JITFrameKey>& srcKeys = iter.get().value(); + mozilla::Vector<JITFrameKey> destKeys; + MOZ_RELEASE_ASSERT(destKeys.appendAll(srcKeys)); + jitAddressToJITFramesMap.putNewInfallible(iter.get().key(), + std::move(destKeys)); + } + + JITFrameInfoForBufferRange::JITFrameToFrameJSONMap jitFrameToFrameJSONMap; + MOZ_RELEASE_ASSERT( + jitFrameToFrameJSONMap.reserve(mJITFrameToFrameJSONMap.count())); + for (auto iter = mJITFrameToFrameJSONMap.iter(); !iter.done(); iter.next()) { + jitFrameToFrameJSONMap.putNewInfallible(iter.get().key(), + iter.get().value()); + } + + return JITFrameInfoForBufferRange{mRangeStart, mRangeEnd, + std::move(jitAddressToJITFramesMap), + std::move(jitFrameToFrameJSONMap)}; +} + +JITFrameInfo::JITFrameInfo(const JITFrameInfo& aOther) + : mUniqueStrings(MakeUnique<UniqueJSONStrings>(*aOther.mUniqueStrings)) { + for (const JITFrameInfoForBufferRange& range : aOther.mRanges) { + MOZ_RELEASE_ASSERT(mRanges.append(range.Clone())); + } +} + +bool UniqueStacks::FrameKey::NormalFrameData::operator==( + const NormalFrameData& aOther) const { + return mLocation == aOther.mLocation && + mRelevantForJS == aOther.mRelevantForJS && + mBaselineInterp == aOther.mBaselineInterp && + mInnerWindowID == aOther.mInnerWindowID && mLine == aOther.mLine && + mColumn == aOther.mColumn && mCategoryPair == aOther.mCategoryPair; +} + +bool UniqueStacks::FrameKey::JITFrameData::operator==( + const JITFrameData& aOther) const { + return mCanonicalAddress == aOther.mCanonicalAddress && + mDepth == aOther.mDepth && mRangeIndex == aOther.mRangeIndex; +} + +// Consume aJITFrameInfo by stealing its string table and its JIT frame info +// ranges. The JIT frame info contains JSON which refers to strings from the +// JIT frame info's string table, so our string table needs to have the same +// strings at the same indices. +UniqueStacks::UniqueStacks(JITFrameInfo&& aJITFrameInfo) + : mUniqueStrings(std::move(aJITFrameInfo.mUniqueStrings)), + mJITInfoRanges(std::move(aJITFrameInfo.mRanges)) { + mFrameTableWriter.StartBareList(); + mStackTableWriter.StartBareList(); +} + +uint32_t UniqueStacks::GetOrAddStackIndex(const StackKey& aStack) { + uint32_t count = mStackToIndexMap.count(); + auto entry = mStackToIndexMap.lookupForAdd(aStack); + if (entry) { + MOZ_ASSERT(entry->value() < count); + return entry->value(); + } + + MOZ_RELEASE_ASSERT(mStackToIndexMap.add(entry, aStack, count)); + StreamStack(aStack); + return count; +} + +Maybe<Vector<UniqueStacks::FrameKey>> +UniqueStacks::LookupFramesForJITAddressFromBufferPos(void* aJITAddress, + uint64_t aBufferPos) { + JITFrameInfoForBufferRange* rangeIter = + std::lower_bound(mJITInfoRanges.begin(), mJITInfoRanges.end(), aBufferPos, + [](const JITFrameInfoForBufferRange& aRange, + uint64_t aPos) { return aRange.mRangeEnd < aPos; }); + MOZ_RELEASE_ASSERT( + rangeIter != mJITInfoRanges.end() && + rangeIter->mRangeStart <= aBufferPos && + aBufferPos < rangeIter->mRangeEnd, + "Buffer position of jit address needs to be in one of the ranges"); + + using JITFrameKey = JITFrameInfoForBufferRange::JITFrameKey; + + const JITFrameInfoForBufferRange& jitFrameInfoRange = *rangeIter; + auto jitFrameKeys = + jitFrameInfoRange.mJITAddressToJITFramesMap.lookup(aJITAddress); + if (!jitFrameKeys) { + return Nothing(); + } + + // Map the array of JITFrameKeys to an array of FrameKeys, and ensure that + // each of the FrameKeys exists in mFrameToIndexMap. + Vector<FrameKey> frameKeys; + MOZ_RELEASE_ASSERT(frameKeys.initCapacity(jitFrameKeys->value().length())); + for (const JITFrameKey& jitFrameKey : jitFrameKeys->value()) { + FrameKey frameKey(jitFrameKey.mCanonicalAddress, jitFrameKey.mDepth, + rangeIter - mJITInfoRanges.begin()); + uint32_t index = mFrameToIndexMap.count(); + auto entry = mFrameToIndexMap.lookupForAdd(frameKey); + if (!entry) { + // We need to add this frame to our frame table. The JSON for this frame + // already exists in jitFrameInfoRange, we just need to splice it into + // the frame table and give it an index. + auto frameJSON = + jitFrameInfoRange.mJITFrameToFrameJSONMap.lookup(jitFrameKey); + MOZ_RELEASE_ASSERT(frameJSON, "Should have cached JSON for this frame"); + mFrameTableWriter.Splice(frameJSON->value()); + MOZ_RELEASE_ASSERT(mFrameToIndexMap.add(entry, frameKey, index)); + } + MOZ_RELEASE_ASSERT(frameKeys.append(std::move(frameKey))); + } + return Some(std::move(frameKeys)); +} + +uint32_t UniqueStacks::GetOrAddFrameIndex(const FrameKey& aFrame) { + uint32_t count = mFrameToIndexMap.count(); + auto entry = mFrameToIndexMap.lookupForAdd(aFrame); + if (entry) { + MOZ_ASSERT(entry->value() < count); + return entry->value(); + } + + MOZ_RELEASE_ASSERT(mFrameToIndexMap.add(entry, aFrame, count)); + StreamNonJITFrame(aFrame); + return count; +} + +void UniqueStacks::SpliceFrameTableElements(SpliceableJSONWriter& aWriter) { + mFrameTableWriter.EndBareList(); + aWriter.TakeAndSplice(mFrameTableWriter.TakeChunkedWriteFunc()); +} + +void UniqueStacks::SpliceStackTableElements(SpliceableJSONWriter& aWriter) { + mStackTableWriter.EndBareList(); + aWriter.TakeAndSplice(mStackTableWriter.TakeChunkedWriteFunc()); +} + +void UniqueStacks::StreamStack(const StackKey& aStack) { + enum Schema : uint32_t { PREFIX = 0, FRAME = 1 }; + + AutoArraySchemaWriter writer(mStackTableWriter); + if (aStack.mPrefixStackIndex.isSome()) { + writer.IntElement(PREFIX, *aStack.mPrefixStackIndex); + } + writer.IntElement(FRAME, aStack.mFrameIndex); +} + +void UniqueStacks::StreamNonJITFrame(const FrameKey& aFrame) { + using NormalFrameData = FrameKey::NormalFrameData; + + enum Schema : uint32_t { + LOCATION = 0, + RELEVANT_FOR_JS = 1, + INNER_WINDOW_ID = 2, + IMPLEMENTATION = 3, + OPTIMIZATIONS = 4, + LINE = 5, + COLUMN = 6, + CATEGORY = 7, + SUBCATEGORY = 8 + }; + + AutoArraySchemaWithStringsWriter writer(mFrameTableWriter, *mUniqueStrings); + + const NormalFrameData& data = aFrame.mData.as<NormalFrameData>(); + writer.StringElement(LOCATION, data.mLocation); + writer.BoolElement(RELEVANT_FOR_JS, data.mRelevantForJS); + + // It's okay to convert uint64_t to double here because DOM always creates IDs + // that are convertible to double. + writer.DoubleElement(INNER_WINDOW_ID, data.mInnerWindowID); + + // The C++ interpreter is the default implementation so we only emit element + // for Baseline Interpreter frames. + if (data.mBaselineInterp) { + writer.StringElement(IMPLEMENTATION, MakeStringSpan("blinterp")); + } + + if (data.mLine.isSome()) { + writer.IntElement(LINE, *data.mLine); + } + if (data.mColumn.isSome()) { + writer.IntElement(COLUMN, *data.mColumn); + } + if (data.mCategoryPair.isSome()) { + const JS::ProfilingCategoryPairInfo& info = + JS::GetProfilingCategoryPairInfo(*data.mCategoryPair); + writer.IntElement(CATEGORY, uint32_t(info.mCategory)); + writer.IntElement(SUBCATEGORY, info.mSubcategoryIndex); + } +} + +static void StreamJITFrame(JSContext* aContext, SpliceableJSONWriter& aWriter, + UniqueJSONStrings& aUniqueStrings, + const JS::ProfiledFrameHandle& aJITFrame) { + enum Schema : uint32_t { + LOCATION = 0, + RELEVANT_FOR_JS = 1, + INNER_WINDOW_ID = 2, + IMPLEMENTATION = 3, + OPTIMIZATIONS = 4, + LINE = 5, + COLUMN = 6, + CATEGORY = 7, + SUBCATEGORY = 8 + }; + + AutoArraySchemaWithStringsWriter writer(aWriter, aUniqueStrings); + + writer.StringElement(LOCATION, MakeStringSpan(aJITFrame.label())); + writer.BoolElement(RELEVANT_FOR_JS, false); + + // It's okay to convert uint64_t to double here because DOM always creates IDs + // that are convertible to double. + // Realm ID is the name of innerWindowID inside JS code. + writer.DoubleElement(INNER_WINDOW_ID, aJITFrame.realmID()); + + JS::ProfilingFrameIterator::FrameKind frameKind = aJITFrame.frameKind(); + MOZ_ASSERT(frameKind == JS::ProfilingFrameIterator::Frame_Ion || + frameKind == JS::ProfilingFrameIterator::Frame_Baseline); + writer.StringElement(IMPLEMENTATION, + frameKind == JS::ProfilingFrameIterator::Frame_Ion + ? MakeStringSpan("ion") + : MakeStringSpan("baseline")); + + const JS::ProfilingCategoryPairInfo& info = JS::GetProfilingCategoryPairInfo( + frameKind == JS::ProfilingFrameIterator::Frame_Ion + ? JS::ProfilingCategoryPair::JS_IonMonkey + : JS::ProfilingCategoryPair::JS_Baseline); + writer.IntElement(CATEGORY, uint32_t(info.mCategory)); + writer.IntElement(SUBCATEGORY, info.mSubcategoryIndex); +} + +struct CStringWriteFunc : public JSONWriteFunc { + nsACString& mBuffer; // The struct must not outlive this buffer + explicit CStringWriteFunc(nsACString& aBuffer) : mBuffer(aBuffer) {} + + void Write(const Span<const char>& aStr) override { mBuffer.Append(aStr); } +}; + +static nsCString JSONForJITFrame(JSContext* aContext, + const JS::ProfiledFrameHandle& aJITFrame, + UniqueJSONStrings& aUniqueStrings) { + nsCString json; + SpliceableJSONWriter writer(MakeUnique<CStringWriteFunc>(json)); + StreamJITFrame(aContext, writer, aUniqueStrings, aJITFrame); + return json; +} + +void JITFrameInfo::AddInfoForRange( + uint64_t aRangeStart, uint64_t aRangeEnd, JSContext* aCx, + const std::function<void(const std::function<void(void*)>&)>& + aJITAddressProvider) { + if (aRangeStart == aRangeEnd) { + return; + } + + MOZ_RELEASE_ASSERT(aRangeStart < aRangeEnd); + + if (!mRanges.empty()) { + const JITFrameInfoForBufferRange& prevRange = mRanges.back(); + MOZ_RELEASE_ASSERT(prevRange.mRangeEnd <= aRangeStart, + "Ranges must be non-overlapping and added in-order."); + } + + using JITFrameKey = JITFrameInfoForBufferRange::JITFrameKey; + + JITFrameInfoForBufferRange::JITAddressToJITFramesMap jitAddressToJITFrameMap; + JITFrameInfoForBufferRange::JITFrameToFrameJSONMap jitFrameToFrameJSONMap; + + aJITAddressProvider([&](void* aJITAddress) { + // Make sure that we have cached data for aJITAddress. + auto addressEntry = jitAddressToJITFrameMap.lookupForAdd(aJITAddress); + if (!addressEntry) { + Vector<JITFrameKey> jitFrameKeys; + for (JS::ProfiledFrameHandle handle : + JS::GetProfiledFrames(aCx, aJITAddress)) { + uint32_t depth = jitFrameKeys.length(); + JITFrameKey jitFrameKey{handle.canonicalAddress(), depth}; + auto frameEntry = jitFrameToFrameJSONMap.lookupForAdd(jitFrameKey); + if (!frameEntry) { + MOZ_RELEASE_ASSERT(jitFrameToFrameJSONMap.add( + frameEntry, jitFrameKey, + JSONForJITFrame(aCx, handle, *mUniqueStrings))); + } + MOZ_RELEASE_ASSERT(jitFrameKeys.append(jitFrameKey)); + } + MOZ_RELEASE_ASSERT(jitAddressToJITFrameMap.add(addressEntry, aJITAddress, + std::move(jitFrameKeys))); + } + }); + + MOZ_RELEASE_ASSERT(mRanges.append(JITFrameInfoForBufferRange{ + aRangeStart, aRangeEnd, std::move(jitAddressToJITFrameMap), + std::move(jitFrameToFrameJSONMap)})); +} + +struct ProfileSample { + uint32_t mStack; + double mTime; + Maybe<double> mResponsiveness; + RunningTimes mRunningTimes; +}; + +// Write CPU measurements with "Delta" unit, which is some amount of work that +// happened since the previous sample. +static void WriteDelta(AutoArraySchemaWriter& aSchemaWriter, uint32_t aProperty, + uint64_t aDelta) { + aSchemaWriter.IntElement(aProperty, int64_t(aDelta)); +} + +static void WriteSample(SpliceableJSONWriter& aWriter, + const ProfileSample& aSample) { + enum Schema : uint32_t { + STACK = 0, + TIME = 1, + EVENT_DELAY = 2 +#define RUNNING_TIME_SCHEMA(index, name, unit, jsonProperty) , name + PROFILER_FOR_EACH_RUNNING_TIME(RUNNING_TIME_SCHEMA) +#undef RUNNING_TIME_SCHEMA + }; + + AutoArraySchemaWriter writer(aWriter); + + writer.IntElement(STACK, aSample.mStack); + + writer.DoubleElement(TIME, aSample.mTime); + + if (aSample.mResponsiveness.isSome()) { + writer.DoubleElement(EVENT_DELAY, *aSample.mResponsiveness); + } + +#define RUNNING_TIME_STREAM(index, name, unit, jsonProperty) \ + aSample.mRunningTimes.Get##name##unit().apply( \ + [&writer](const uint64_t& aValue) { \ + Write##unit(writer, name, aValue); \ + }); + + PROFILER_FOR_EACH_RUNNING_TIME(RUNNING_TIME_STREAM) + +#undef RUNNING_TIME_STREAM +} + +class EntryGetter { + public: + explicit EntryGetter(ProfileChunkedBuffer::Reader& aReader, + uint64_t aInitialReadPos = 0) + : mBlockIt( + aReader.At(ProfileBufferBlockIndex::CreateFromProfileBufferIndex( + aInitialReadPos))), + mBlockItEnd(aReader.end()) { + if (!ReadLegacyOrEnd()) { + // Find and read the next non-legacy entry. + Next(); + } + } + + bool Has() const { return mBlockIt != mBlockItEnd; } + + const ProfileBufferEntry& Get() const { + MOZ_ASSERT(Has(), "Caller should have checked `Has()` before `Get()`"); + return mEntry; + } + + void Next() { + MOZ_ASSERT(Has(), "Caller should have checked `Has()` before `Get()`"); + for (;;) { + ++mBlockIt; + if (ReadLegacyOrEnd()) { + // Either we're at the end, or we could read a legacy entry -> Done. + break; + } + // Otherwise loop around until we hit a legacy entry or the end. + } + } + + ProfileChunkedBuffer::BlockIterator Iterator() const { return mBlockIt; } + + ProfileBufferBlockIndex CurBlockIndex() const { + return mBlockIt.CurrentBlockIndex(); + } + + uint64_t CurPos() const { + return CurBlockIndex().ConvertToProfileBufferIndex(); + } + + private: + // Try to read the entry at the current `mBlockIt` position. + // * If we're at the end of the buffer, just return `true`. + // * If there is a "legacy" entry (containing a real `ProfileBufferEntry`), + // read it into `mEntry`, and return `true` as well. + // * Otherwise the entry contains a "modern" type that cannot be read into + // `mEntry`, return `false` (so `EntryGetter` can skip to another entry). + bool ReadLegacyOrEnd() { + if (!Has()) { + return true; + } + // Read the entry "kind", which is always at the start of all entries. + ProfileBufferEntryReader er = *mBlockIt; + auto type = static_cast<ProfileBufferEntry::Kind>( + er.ReadObject<ProfileBufferEntry::KindUnderlyingType>()); + MOZ_ASSERT(static_cast<ProfileBufferEntry::KindUnderlyingType>(type) < + static_cast<ProfileBufferEntry::KindUnderlyingType>( + ProfileBufferEntry::Kind::MODERN_LIMIT)); + if (type >= ProfileBufferEntry::Kind::LEGACY_LIMIT) { + er.SetRemainingBytes(0); + return false; + } + // Here, we have a legacy item, we need to read it from the start. + // Because the above `ReadObject` moved the reader, we ned to reset it to + // the start of the entry before reading the whole entry. + er = *mBlockIt; + er.ReadBytes(&mEntry, er.RemainingBytes()); + return true; + } + + ProfileBufferEntry mEntry; + ProfileChunkedBuffer::BlockIterator mBlockIt; + const ProfileChunkedBuffer::BlockIterator mBlockItEnd; +}; + +// The following grammar shows legal sequences of profile buffer entries. +// The sequences beginning with a ThreadId entry are known as "samples". +// +// ( +// ( /* Samples */ +// ThreadId +// TimeBeforeCompactStack +// UnresponsivenessDurationMs? +// CompactStack +// /* internally including: +// ( NativeLeafAddr +// | Label FrameFlags? DynamicStringFragment* +// LineNumber? CategoryPair? +// | JitReturnAddr +// )+ +// */ +// ) +// | Marker +// | ( /* Counters */ +// CounterId +// Time +// ( +// CounterKey +// Count +// Number? +// )* +// ) +// | CollectionStart +// | CollectionEnd +// | Pause +// | Resume +// | ( ProfilerOverheadTime /* Sampling start timestamp */ +// ProfilerOverheadDuration /* Lock acquisition */ +// ProfilerOverheadDuration /* Expired markers cleaning */ +// ProfilerOverheadDuration /* Counters */ +// ProfilerOverheadDuration /* Threads */ +// ) +// )* +// +// The most complicated part is the stack entry sequence that begins with +// Label. Here are some examples. +// +// - ProfilingStack frames without a dynamic string: +// +// Label("js::RunScript") +// CategoryPair(JS::ProfilingCategoryPair::JS) +// +// Label("XREMain::XRE_main") +// LineNumber(4660) +// CategoryPair(JS::ProfilingCategoryPair::OTHER) +// +// Label("ElementRestyler::ComputeStyleChangeFor") +// LineNumber(3003) +// CategoryPair(JS::ProfilingCategoryPair::CSS) +// +// - ProfilingStack frames with a dynamic string: +// +// Label("nsObserverService::NotifyObservers") +// FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME)) +// DynamicStringFragment("domwindo") +// DynamicStringFragment("wopened") +// LineNumber(291) +// CategoryPair(JS::ProfilingCategoryPair::OTHER) +// +// Label("") +// FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_JS_FRAME)) +// DynamicStringFragment("closeWin") +// DynamicStringFragment("dow (chr") +// DynamicStringFragment("ome://gl") +// DynamicStringFragment("obal/con") +// DynamicStringFragment("tent/glo") +// DynamicStringFragment("balOverl") +// DynamicStringFragment("ay.js:5)") +// DynamicStringFragment("") # this string holds the closing '\0' +// LineNumber(25) +// CategoryPair(JS::ProfilingCategoryPair::JS) +// +// Label("") +// FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_JS_FRAME)) +// DynamicStringFragment("bound (s") +// DynamicStringFragment("elf-host") +// DynamicStringFragment("ed:914)") +// LineNumber(945) +// CategoryPair(JS::ProfilingCategoryPair::JS) +// +// - A profiling stack frame with an overly long dynamic string: +// +// Label("") +// FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME)) +// DynamicStringFragment("(too lon") +// DynamicStringFragment("g)") +// LineNumber(100) +// CategoryPair(JS::ProfilingCategoryPair::NETWORK) +// +// - A wasm JIT frame: +// +// Label("") +// FrameFlags(uint64_t(0)) +// DynamicStringFragment("wasm-fun") +// DynamicStringFragment("ction[87") +// DynamicStringFragment("36] (blo") +// DynamicStringFragment("b:http:/") +// DynamicStringFragment("/webasse") +// DynamicStringFragment("mbly.org") +// DynamicStringFragment("/3dc5759") +// DynamicStringFragment("4-ce58-4") +// DynamicStringFragment("626-975b") +// DynamicStringFragment("-08ad116") +// DynamicStringFragment("30bc1:38") +// DynamicStringFragment("29856)") +// +// - A JS frame in a synchronous sample: +// +// Label("") +// FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME)) +// DynamicStringFragment("u (https") +// DynamicStringFragment("://perf-") +// DynamicStringFragment("html.io/") +// DynamicStringFragment("ac0da204") +// DynamicStringFragment("aaa44d75") +// DynamicStringFragment("a800.bun") +// DynamicStringFragment("dle.js:2") +// DynamicStringFragment("5)") + +// Because this is a format entirely internal to the Profiler, any parsing +// error indicates a bug in the ProfileBuffer writing or the parser itself, +// or possibly flaky hardware. +#define ERROR_AND_CONTINUE(msg) \ + { \ + fprintf(stderr, "ProfileBuffer parse error: %s", msg); \ + MOZ_ASSERT(false, msg); \ + continue; \ + } + +int ProfileBuffer::StreamSamplesToJSON(SpliceableJSONWriter& aWriter, + int aThreadId, double aSinceTime, + UniqueStacks& aUniqueStacks) const { + UniquePtr<char[]> dynStrBuf = MakeUnique<char[]>(kMaxFrameKeyLength); + + return mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) { + MOZ_ASSERT(aReader, + "ProfileChunkedBuffer cannot be out-of-session when sampler is " + "running"); + + int processedThreadId = 0; + + EntryGetter e(*aReader); + + for (;;) { + // This block skips entries until we find the start of the next sample. + // This is useful in three situations. + // + // - The circular buffer overwrites old entries, so when we start parsing + // we might be in the middle of a sample, and we must skip forward to + // the start of the next sample. + // + // - We skip samples that don't have an appropriate ThreadId or Time. + // + // - We skip range Pause, Resume, CollectionStart, Marker, Counter + // and CollectionEnd entries between samples. + while (e.Has()) { + if (e.Get().IsThreadId()) { + break; + } + e.Next(); + } + + if (!e.Has()) { + break; + } + + // Due to the skip_to_next_sample block above, if we have an entry here it + // must be a ThreadId entry. + MOZ_ASSERT(e.Get().IsThreadId()); + + int threadId = e.Get().GetInt(); + e.Next(); + + // Ignore samples that are for the wrong thread. + if (threadId != aThreadId && aThreadId != 0) { + continue; + } + + MOZ_ASSERT(aThreadId != 0 || processedThreadId == 0, + "aThreadId==0 should only be used with 1-sample buffer"); + + ProfileSample sample; + + auto ReadStack = [&](EntryGetter& e, uint64_t entryPosition, + const Maybe<double>& unresponsiveDuration, + const RunningTimes& aRunningTimes) { + UniqueStacks::StackKey stack = + aUniqueStacks.BeginStack(UniqueStacks::FrameKey("(root)")); + + int numFrames = 0; + while (e.Has()) { + if (e.Get().IsNativeLeafAddr()) { + numFrames++; + + void* pc = e.Get().GetPtr(); + e.Next(); + + nsAutoCString buf; + + if (!aUniqueStacks.mCodeAddressService || + !aUniqueStacks.mCodeAddressService->GetFunction(pc, buf) || + buf.IsEmpty()) { + buf.AppendASCII("0x"); + // `AppendInt` only knows `uint32_t` or `uint64_t`, but because + // these are just aliases for *two* of (`unsigned`, `unsigned + // long`, and `unsigned long long`), a call with `uintptr_t` could + // use the third type and therefore would be ambiguous. + // So we want to force using exactly `uint32_t` or `uint64_t`, + // whichever matches the size of `uintptr_t`. + // (The outer cast to `uint` should then be a no-op.) + using uint = + std::conditional_t<sizeof(uintptr_t) <= sizeof(uint32_t), + uint32_t, uint64_t>; + buf.AppendInt(static_cast<uint>(reinterpret_cast<uintptr_t>(pc)), + 16); + } + + stack = aUniqueStacks.AppendFrame( + stack, UniqueStacks::FrameKey(buf.get())); + + } else if (e.Get().IsLabel()) { + numFrames++; + + const char* label = e.Get().GetString(); + e.Next(); + + using FrameFlags = js::ProfilingStackFrame::Flags; + uint32_t frameFlags = 0; + if (e.Has() && e.Get().IsFrameFlags()) { + frameFlags = uint32_t(e.Get().GetUint64()); + e.Next(); + } + + bool relevantForJS = + frameFlags & uint32_t(FrameFlags::RELEVANT_FOR_JS); + + bool isBaselineInterp = + frameFlags & uint32_t(FrameFlags::IS_BLINTERP_FRAME); + + // Copy potential dynamic string fragments into dynStrBuf, so that + // dynStrBuf will then contain the entire dynamic string. + size_t i = 0; + dynStrBuf[0] = '\0'; + while (e.Has()) { + if (e.Get().IsDynamicStringFragment()) { + char chars[ProfileBufferEntry::kNumChars]; + e.Get().CopyCharsInto(chars); + for (char c : chars) { + if (i < kMaxFrameKeyLength) { + dynStrBuf[i] = c; + i++; + } + } + e.Next(); + } else { + break; + } + } + dynStrBuf[kMaxFrameKeyLength - 1] = '\0'; + bool hasDynamicString = (i != 0); + + nsAutoCStringN<1024> frameLabel; + if (label[0] != '\0' && hasDynamicString) { + if (frameFlags & uint32_t(FrameFlags::STRING_TEMPLATE_METHOD)) { + frameLabel.AppendPrintf("%s.%s", label, dynStrBuf.get()); + } else if (frameFlags & + uint32_t(FrameFlags::STRING_TEMPLATE_GETTER)) { + frameLabel.AppendPrintf("get %s.%s", label, dynStrBuf.get()); + } else if (frameFlags & + uint32_t(FrameFlags::STRING_TEMPLATE_SETTER)) { + frameLabel.AppendPrintf("set %s.%s", label, dynStrBuf.get()); + } else { + frameLabel.AppendPrintf("%s %s", label, dynStrBuf.get()); + } + } else if (hasDynamicString) { + frameLabel.Append(dynStrBuf.get()); + } else { + frameLabel.Append(label); + } + + uint64_t innerWindowID = 0; + if (e.Has() && e.Get().IsInnerWindowID()) { + innerWindowID = uint64_t(e.Get().GetUint64()); + e.Next(); + } + + Maybe<unsigned> line; + if (e.Has() && e.Get().IsLineNumber()) { + line = Some(unsigned(e.Get().GetInt())); + e.Next(); + } + + Maybe<unsigned> column; + if (e.Has() && e.Get().IsColumnNumber()) { + column = Some(unsigned(e.Get().GetInt())); + e.Next(); + } + + Maybe<JS::ProfilingCategoryPair> categoryPair; + if (e.Has() && e.Get().IsCategoryPair()) { + categoryPair = + Some(JS::ProfilingCategoryPair(uint32_t(e.Get().GetInt()))); + e.Next(); + } + + stack = aUniqueStacks.AppendFrame( + stack, + UniqueStacks::FrameKey(std::move(frameLabel), relevantForJS, + isBaselineInterp, innerWindowID, line, + column, categoryPair)); + + } else if (e.Get().IsJitReturnAddr()) { + numFrames++; + + // A JIT frame may expand to multiple frames due to inlining. + void* pc = e.Get().GetPtr(); + const Maybe<Vector<UniqueStacks::FrameKey>>& frameKeys = + aUniqueStacks.LookupFramesForJITAddressFromBufferPos( + pc, entryPosition ? entryPosition : e.CurPos()); + MOZ_RELEASE_ASSERT( + frameKeys, + "Attempting to stream samples for a buffer range " + "for which we don't have JITFrameInfo?"); + for (const UniqueStacks::FrameKey& frameKey : *frameKeys) { + stack = aUniqueStacks.AppendFrame(stack, frameKey); + } + + e.Next(); + + } else { + break; + } + } + + if (numFrames == 0) { + // It is possible to have empty stacks if native stackwalking is + // disabled. Skip samples with empty stacks. (See Bug 1497985). + // Thus, don't use ERROR_AND_CONTINUE, but just continue by returning + // from this lambda. + return; + } + + sample.mStack = aUniqueStacks.GetOrAddStackIndex(stack); + + if (unresponsiveDuration.isSome()) { + sample.mResponsiveness = unresponsiveDuration; + } + + sample.mRunningTimes = aRunningTimes; + + WriteSample(aWriter, sample); + + processedThreadId = threadId; + }; // End of `ReadStack(EntryGetter&)` lambda. + + if (e.Has() && e.Get().IsTime()) { + sample.mTime = e.Get().GetDouble(); + e.Next(); + + // Ignore samples that are too old. + if (sample.mTime < aSinceTime) { + continue; + } + + ReadStack(e, 0, Nothing{}, RunningTimes{}); + } else if (e.Has() && e.Get().IsTimeBeforeCompactStack()) { + sample.mTime = e.Get().GetDouble(); + + // Ignore samples that are too old. + if (sample.mTime < aSinceTime) { + e.Next(); + continue; + } + + RunningTimes runningTimes; + Maybe<double> unresponsiveDuration; + + ProfileChunkedBuffer::BlockIterator it = e.Iterator(); + for (;;) { + ++it; + if (it.IsAtEnd()) { + break; + } + ProfileBufferEntryReader er = *it; + ProfileBufferEntry::Kind kind = + er.ReadObject<ProfileBufferEntry::Kind>(); + + // There may be running times before the CompactStack. + if (kind == ProfileBufferEntry::Kind::RunningTimes) { + er.ReadIntoObject(runningTimes); + continue; + } + + // There may be an UnresponsiveDurationMs before the CompactStack. + if (kind == ProfileBufferEntry::Kind::UnresponsiveDurationMs) { + unresponsiveDuration = Some(er.ReadObject<double>()); + continue; + } + + if (kind == ProfileBufferEntry::Kind::CompactStack) { + ProfileChunkedBuffer tempBuffer( + ProfileChunkedBuffer::ThreadSafety::WithoutMutex, + mWorkerChunkManager); + er.ReadIntoObject(tempBuffer); + tempBuffer.Read([&](ProfileChunkedBuffer::Reader* aReader) { + MOZ_ASSERT(aReader, + "Local ProfileChunkedBuffer cannot be out-of-session"); + EntryGetter stackEntryGetter(*aReader); + if (stackEntryGetter.Has()) { + ReadStack(stackEntryGetter, + it.CurrentBlockIndex().ConvertToProfileBufferIndex(), + unresponsiveDuration, runningTimes); + } + }); + mWorkerChunkManager.Reset(tempBuffer.GetAllChunks()); + break; + } + + MOZ_ASSERT(kind >= ProfileBufferEntry::Kind::LEGACY_LIMIT, + "There should be no legacy entries between " + "TimeBeforeCompactStack and CompactStack"); + er.SetRemainingBytes(0); + } + + e.Next(); + } else { + ERROR_AND_CONTINUE("expected a Time entry"); + } + } + + return processedThreadId; + }); +} + +void ProfileBuffer::AddJITInfoForRange(uint64_t aRangeStart, int aThreadId, + JSContext* aContext, + JITFrameInfo& aJITFrameInfo) const { + // We can only process JitReturnAddr entries if we have a JSContext. + MOZ_RELEASE_ASSERT(aContext); + + aRangeStart = std::max(aRangeStart, BufferRangeStart()); + aJITFrameInfo.AddInfoForRange( + aRangeStart, BufferRangeEnd(), aContext, + [&](const std::function<void(void*)>& aJITAddressConsumer) { + // Find all JitReturnAddr entries in the given range for the given + // thread, and call aJITAddressConsumer with those addresses. + + mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) { + MOZ_ASSERT(aReader, + "ProfileChunkedBuffer cannot be out-of-session when " + "sampler is running"); + + EntryGetter e(*aReader, aRangeStart); + + while (true) { + // Advance to the next ThreadId entry. + while (e.Has() && !e.Get().IsThreadId()) { + e.Next(); + } + if (!e.Has()) { + break; + } + + MOZ_ASSERT(e.Get().IsThreadId()); + int threadId = e.Get().GetInt(); + e.Next(); + + // Ignore samples that are for a different thread. + if (threadId != aThreadId) { + continue; + } + + if (e.Has() && e.Get().IsTime()) { + // Legacy stack. + e.Next(); + while (e.Has() && !e.Get().IsThreadId()) { + if (e.Get().IsJitReturnAddr()) { + aJITAddressConsumer(e.Get().GetPtr()); + } + e.Next(); + } + } else if (e.Has() && e.Get().IsTimeBeforeCompactStack()) { + // Compact stack. + ProfileChunkedBuffer::BlockIterator it = e.Iterator(); + for (;;) { + ++it; + if (it.IsAtEnd()) { + break; + } + ProfileBufferEntryReader er = *it; + ProfileBufferEntry::Kind kind = + er.ReadObject<ProfileBufferEntry::Kind>(); + if (kind == ProfileBufferEntry::Kind::CompactStack) { + ProfileChunkedBuffer tempBuffer( + ProfileChunkedBuffer::ThreadSafety::WithoutMutex, + mWorkerChunkManager); + er.ReadIntoObject(tempBuffer); + tempBuffer.Read([&](ProfileChunkedBuffer::Reader* aReader) { + MOZ_ASSERT( + aReader, + "Local ProfileChunkedBuffer cannot be out-of-session"); + EntryGetter stackEntryGetter(*aReader); + while (stackEntryGetter.Has()) { + if (stackEntryGetter.Get().IsJitReturnAddr()) { + aJITAddressConsumer(stackEntryGetter.Get().GetPtr()); + } + stackEntryGetter.Next(); + } + }); + mWorkerChunkManager.Reset(tempBuffer.GetAllChunks()); + break; + } + + MOZ_ASSERT(kind >= ProfileBufferEntry::Kind::LEGACY_LIMIT, + "There should be no legacy entries between " + "TimeBeforeCompactStack and CompactStack"); + er.SetRemainingBytes(0); + } + + e.Next(); + } else { + ERROR_AND_CONTINUE("expected a Time entry"); + } + } + }); + }); +} + +void ProfileBuffer::StreamMarkersToJSON(SpliceableJSONWriter& aWriter, + int aThreadId, + const TimeStamp& aProcessStartTime, + double aSinceTime, + UniqueStacks& aUniqueStacks) const { + mEntries.ReadEach([&](ProfileBufferEntryReader& aER) { + auto type = static_cast<ProfileBufferEntry::Kind>( + aER.ReadObject<ProfileBufferEntry::KindUnderlyingType>()); + MOZ_ASSERT(static_cast<ProfileBufferEntry::KindUnderlyingType>(type) < + static_cast<ProfileBufferEntry::KindUnderlyingType>( + ProfileBufferEntry::Kind::MODERN_LIMIT)); + bool entryWasFullyRead = false; + + if (type == ProfileBufferEntry::Kind::Marker) { + entryWasFullyRead = + mozilla::base_profiler_markers_detail::DeserializeAfterKindAndStream( + aER, aWriter, aThreadId, + [&](ProfileChunkedBuffer& aChunkedBuffer) { + ProfilerBacktrace backtrace("", &aChunkedBuffer); + backtrace.StreamJSON(aWriter, aProcessStartTime, aUniqueStacks); + }); + } + + if (!entryWasFullyRead) { + // The entry was not a marker, or it was a marker for another thread. + // We probably didn't read the whole entry, so we need to skip to the end. + aER.SetRemainingBytes(0); + } + }); +} + +void ProfileBuffer::StreamProfilerOverheadToJSON( + SpliceableJSONWriter& aWriter, const TimeStamp& aProcessStartTime, + double aSinceTime) const { + mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) { + MOZ_ASSERT(aReader, + "ProfileChunkedBuffer cannot be out-of-session when sampler is " + "running"); + + EntryGetter e(*aReader); + + enum Schema : uint32_t { + TIME = 0, + LOCKING = 1, + MARKER_CLEANING = 2, + COUNTERS = 3, + THREADS = 4 + }; + + aWriter.StartObjectProperty("profilerOverhead"); + aWriter.StartObjectProperty("samples"); + // Stream all sampling overhead data. We skip other entries, because we + // process them in StreamSamplesToJSON()/etc. + { + JSONSchemaWriter schema(aWriter); + schema.WriteField("time"); + schema.WriteField("locking"); + schema.WriteField("expiredMarkerCleaning"); + schema.WriteField("counters"); + schema.WriteField("threads"); + } + + aWriter.StartArrayProperty("data"); + double firstTime = 0.0; + double lastTime = 0.0; + ProfilerStats intervals, overheads, lockings, cleanings, counters, threads; + while (e.Has()) { + // valid sequence: ProfilerOverheadTime, ProfilerOverheadDuration * 4 + if (e.Get().IsProfilerOverheadTime()) { + double time = e.Get().GetDouble(); + if (time >= aSinceTime) { + e.Next(); + if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) { + ERROR_AND_CONTINUE( + "expected a ProfilerOverheadDuration entry after " + "ProfilerOverheadTime"); + } + double locking = e.Get().GetDouble(); + e.Next(); + if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) { + ERROR_AND_CONTINUE( + "expected a ProfilerOverheadDuration entry after " + "ProfilerOverheadTime,ProfilerOverheadDuration"); + } + double cleaning = e.Get().GetDouble(); + e.Next(); + if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) { + ERROR_AND_CONTINUE( + "expected a ProfilerOverheadDuration entry after " + "ProfilerOverheadTime,ProfilerOverheadDuration*2"); + } + double counter = e.Get().GetDouble(); + e.Next(); + if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) { + ERROR_AND_CONTINUE( + "expected a ProfilerOverheadDuration entry after " + "ProfilerOverheadTime,ProfilerOverheadDuration*3"); + } + double thread = e.Get().GetDouble(); + + if (firstTime == 0.0) { + firstTime = time; + } else { + // Note that we'll have 1 fewer interval than other numbers (because + // we need both ends of an interval to know its duration). The final + // difference should be insignificant over the expected many + // thousands of iterations. + intervals.Count(time - lastTime); + } + lastTime = time; + overheads.Count(locking + cleaning + counter + thread); + lockings.Count(locking); + cleanings.Count(cleaning); + counters.Count(counter); + threads.Count(thread); + + AutoArraySchemaWriter writer(aWriter); + writer.DoubleElement(TIME, time); + writer.DoubleElement(LOCKING, locking); + writer.DoubleElement(MARKER_CLEANING, cleaning); + writer.DoubleElement(COUNTERS, counter); + writer.DoubleElement(THREADS, thread); + } + } + e.Next(); + } + aWriter.EndArray(); // data + aWriter.EndObject(); // samples + + // Only output statistics if there is at least one full interval (and + // therefore at least two samplings.) + if (intervals.n > 0) { + aWriter.StartObjectProperty("statistics"); + aWriter.DoubleProperty("profiledDuration", lastTime - firstTime); + aWriter.IntProperty("samplingCount", overheads.n); + aWriter.DoubleProperty("overheadDurations", overheads.sum); + aWriter.DoubleProperty("overheadPercentage", + overheads.sum / (lastTime - firstTime)); +#define PROFILER_STATS(name, var) \ + aWriter.DoubleProperty("mean" name, (var).sum / (var).n); \ + aWriter.DoubleProperty("min" name, (var).min); \ + aWriter.DoubleProperty("max" name, (var).max); + PROFILER_STATS("Interval", intervals); + PROFILER_STATS("Overhead", overheads); + PROFILER_STATS("Lockings", lockings); + PROFILER_STATS("Cleaning", cleanings); + PROFILER_STATS("Counter", counters); + PROFILER_STATS("Thread", threads); +#undef PROFILER_STATS + aWriter.EndObject(); // statistics + } + aWriter.EndObject(); // profilerOverhead + }); +} + +struct CounterKeyedSample { + double mTime; + uint64_t mNumber; + int64_t mCount; +}; + +using CounterKeyedSamples = Vector<CounterKeyedSample>; + +static LazyLogModule sFuzzyfoxLog("Fuzzyfox"); + +using CounterMap = HashMap<uint64_t, CounterKeyedSamples>; + +// HashMap lookup, if not found, a default value is inserted. +// Returns reference to (existing or new) value inside the HashMap. +template <typename HashM, typename Key> +static auto& LookupOrAdd(HashM& aMap, Key&& aKey) { + auto addPtr = aMap.lookupForAdd(aKey); + if (!addPtr) { + MOZ_RELEASE_ASSERT(aMap.add(addPtr, std::forward<Key>(aKey), + typename HashM::Entry::ValueType{})); + MOZ_ASSERT(!!addPtr); + } + return addPtr->value(); +} + +void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter, + const TimeStamp& aProcessStartTime, + double aSinceTime) const { + // Because this is a format entirely internal to the Profiler, any parsing + // error indicates a bug in the ProfileBuffer writing or the parser itself, + // or possibly flaky hardware. + + mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) { + MOZ_ASSERT(aReader, + "ProfileChunkedBuffer cannot be out-of-session when sampler is " + "running"); + + EntryGetter e(*aReader); + + enum Schema : uint32_t { TIME = 0, NUMBER = 1, COUNT = 2 }; + + // Stream all counters. We skip other entries, because we process them in + // StreamSamplesToJSON()/etc. + // + // Valid sequence in the buffer: + // CounterID + // Time + // ( CounterKey Count Number? )* + // + // And the JSON (example): + // "counters": { + // "name": "malloc", + // "category": "Memory", + // "description": "Amount of allocated memory", + // "sample_groups": { + // "id": 0, + // "samples": { + // "schema": {"time": 0, "number": 1, "count": 2}, + // "data": [ + // [ + // 16117.033968000002, + // 2446216, + // 6801320 + // ], + // [ + // 16118.037638, + // 2446216, + // 6801320 + // ], + // ], + // } + // } + // }, + + // Build the map of counters and populate it + HashMap<void*, CounterMap> counters; + + while (e.Has()) { + // skip all non-Counters, including if we start in the middle of a counter + if (e.Get().IsCounterId()) { + void* id = e.Get().GetPtr(); + CounterMap& counter = LookupOrAdd(counters, id); + e.Next(); + if (!e.Has() || !e.Get().IsTime()) { + ERROR_AND_CONTINUE("expected a Time entry"); + } + double time = e.Get().GetDouble(); + if (time >= aSinceTime) { + e.Next(); + while (e.Has() && e.Get().IsCounterKey()) { + uint64_t key = e.Get().GetUint64(); + CounterKeyedSamples& data = LookupOrAdd(counter, key); + e.Next(); + if (!e.Has() || !e.Get().IsCount()) { + ERROR_AND_CONTINUE("expected a Count entry"); + } + int64_t count = e.Get().GetUint64(); + e.Next(); + uint64_t number; + if (!e.Has() || !e.Get().IsNumber()) { + number = 0; + } else { + number = e.Get().GetInt64(); + } + CounterKeyedSample sample = {time, number, count}; + MOZ_RELEASE_ASSERT(data.append(sample)); + } + } else { + // skip counter sample - only need to skip the initial counter + // id, then let the loop at the top skip the rest + } + } + e.Next(); + } + // we have a map of a map of counter entries; dump them to JSON + if (counters.count() == 0) { + return; + } + + aWriter.StartArrayProperty("counters"); + for (auto iter = counters.iter(); !iter.done(); iter.next()) { + CounterMap& counter = iter.get().value(); + const BaseProfilerCount* base_counter = + static_cast<const BaseProfilerCount*>(iter.get().key()); + + aWriter.Start(); + aWriter.StringProperty("name", MakeStringSpan(base_counter->mLabel)); + aWriter.StringProperty("category", + MakeStringSpan(base_counter->mCategory)); + aWriter.StringProperty("description", + MakeStringSpan(base_counter->mDescription)); + + aWriter.StartArrayProperty("sample_groups"); + for (auto counter_iter = counter.iter(); !counter_iter.done(); + counter_iter.next()) { + CounterKeyedSamples& samples = counter_iter.get().value(); + uint64_t key = counter_iter.get().key(); + + size_t size = samples.length(); + if (size == 0) { + continue; + } + + aWriter.StartObjectElement(); + { + aWriter.IntProperty("id", static_cast<int64_t>(key)); + aWriter.StartObjectProperty("samples"); + { + // XXX Can we assume a missing count means 0? + JSONSchemaWriter schema(aWriter); + schema.WriteField("time"); + schema.WriteField("number"); + schema.WriteField("count"); + } + + aWriter.StartArrayProperty("data"); + uint64_t previousNumber = 0; + int64_t previousCount = 0; + for (size_t i = 0; i < size; i++) { + // Encode as deltas, and only encode if different than the last + // sample + if (i == 0 || samples[i].mNumber != previousNumber || + samples[i].mCount != previousCount) { + if (i != 0 && samples[i].mTime >= samples[i - 1].mTime) { + MOZ_LOG(sFuzzyfoxLog, mozilla::LogLevel::Error, + ("Fuzzyfox Profiler Assertion: %f >= %f", + samples[i].mTime, samples[i - 1].mTime)); + } + MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime); + MOZ_ASSERT(samples[i].mNumber >= previousNumber); + MOZ_ASSERT(samples[i].mNumber - previousNumber <= + uint64_t(std::numeric_limits<int64_t>::max())); + + AutoArraySchemaWriter writer(aWriter); + writer.DoubleElement(TIME, samples[i].mTime); + writer.IntElement( + NUMBER, + static_cast<int64_t>(samples[i].mNumber - previousNumber)); + writer.IntElement(COUNT, samples[i].mCount - previousCount); + previousNumber = samples[i].mNumber; + previousCount = samples[i].mCount; + } + } + aWriter.EndArray(); // data + aWriter.EndObject(); // samples + } + aWriter.EndObject(); // sample_groups item + } + aWriter.EndArray(); // sample groups + aWriter.End(); // for each counter + } + aWriter.EndArray(); // counters + }); +} + +#undef ERROR_AND_CONTINUE + +static void AddPausedRange(SpliceableJSONWriter& aWriter, const char* aReason, + const Maybe<double>& aStartTime, + const Maybe<double>& aEndTime) { + aWriter.Start(); + if (aStartTime) { + aWriter.DoubleProperty("startTime", *aStartTime); + } else { + aWriter.NullProperty("startTime"); + } + if (aEndTime) { + aWriter.DoubleProperty("endTime", *aEndTime); + } else { + aWriter.NullProperty("endTime"); + } + aWriter.StringProperty("reason", MakeStringSpan(aReason)); + aWriter.End(); +} + +void ProfileBuffer::StreamPausedRangesToJSON(SpliceableJSONWriter& aWriter, + double aSinceTime) const { + mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) { + MOZ_ASSERT(aReader, + "ProfileChunkedBuffer cannot be out-of-session when sampler is " + "running"); + + EntryGetter e(*aReader); + + Maybe<double> currentPauseStartTime; + Maybe<double> currentCollectionStartTime; + + while (e.Has()) { + if (e.Get().IsPause()) { + currentPauseStartTime = Some(e.Get().GetDouble()); + } else if (e.Get().IsResume()) { + AddPausedRange(aWriter, "profiler-paused", currentPauseStartTime, + Some(e.Get().GetDouble())); + currentPauseStartTime = Nothing(); + } else if (e.Get().IsCollectionStart()) { + currentCollectionStartTime = Some(e.Get().GetDouble()); + } else if (e.Get().IsCollectionEnd()) { + AddPausedRange(aWriter, "collecting", currentCollectionStartTime, + Some(e.Get().GetDouble())); + currentCollectionStartTime = Nothing(); + } + e.Next(); + } + + if (currentPauseStartTime) { + AddPausedRange(aWriter, "profiler-paused", currentPauseStartTime, + Nothing()); + } + if (currentCollectionStartTime) { + AddPausedRange(aWriter, "collecting", currentCollectionStartTime, + Nothing()); + } + }); +} + +bool ProfileBuffer::DuplicateLastSample(int aThreadId, double aSampleTimeMs, + Maybe<uint64_t>& aLastSample, + const RunningTimes& aRunningTimes) { + if (!aLastSample) { + return false; + } + + ProfileChunkedBuffer tempBuffer( + ProfileChunkedBuffer::ThreadSafety::WithoutMutex, mWorkerChunkManager); + + auto retrieveWorkerChunk = MakeScopeExit( + [&]() { mWorkerChunkManager.Reset(tempBuffer.GetAllChunks()); }); + + const bool ok = mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) { + MOZ_ASSERT(aReader, + "ProfileChunkedBuffer cannot be out-of-session when sampler is " + "running"); + + EntryGetter e(*aReader, *aLastSample); + + if (e.CurPos() != *aLastSample) { + // The last sample is no longer within the buffer range, so we cannot + // use it. Reset the stored buffer position to Nothing(). + aLastSample.reset(); + return false; + } + + MOZ_RELEASE_ASSERT(e.Has() && e.Get().IsThreadId() && + e.Get().GetInt() == aThreadId); + + e.Next(); + + // Go through the whole entry and duplicate it, until we find the next + // one. + while (e.Has()) { + switch (e.Get().GetKind()) { + case ProfileBufferEntry::Kind::Pause: + case ProfileBufferEntry::Kind::Resume: + case ProfileBufferEntry::Kind::PauseSampling: + case ProfileBufferEntry::Kind::ResumeSampling: + case ProfileBufferEntry::Kind::CollectionStart: + case ProfileBufferEntry::Kind::CollectionEnd: + case ProfileBufferEntry::Kind::ThreadId: + // We're done. + return true; + case ProfileBufferEntry::Kind::Time: + // Copy with new time + AddEntry(tempBuffer, ProfileBufferEntry::Time(aSampleTimeMs)); + break; + case ProfileBufferEntry::Kind::TimeBeforeCompactStack: { + // Copy with new time, followed by a compact stack. + AddEntry(tempBuffer, + ProfileBufferEntry::TimeBeforeCompactStack(aSampleTimeMs)); + + // Add running times if they have data. + if (!aRunningTimes.IsEmpty()) { + tempBuffer.PutObjects(ProfileBufferEntry::Kind::RunningTimes, + aRunningTimes); + } + + // The `CompactStack` *must* be present afterwards, but may not + // immediately follow `TimeBeforeCompactStack` (e.g., some markers + // could be written in-between), so we need to look for it in the + // following entries. + ProfileChunkedBuffer::BlockIterator it = e.Iterator(); + for (;;) { + ++it; + if (it.IsAtEnd()) { + break; + } + ProfileBufferEntryReader er = *it; + auto kind = static_cast<ProfileBufferEntry::Kind>( + er.ReadObject<ProfileBufferEntry::KindUnderlyingType>()); + MOZ_ASSERT( + static_cast<ProfileBufferEntry::KindUnderlyingType>(kind) < + static_cast<ProfileBufferEntry::KindUnderlyingType>( + ProfileBufferEntry::Kind::MODERN_LIMIT)); + if (kind == ProfileBufferEntry::Kind::CompactStack) { + // Found our CompactStack, just make a copy of the whole entry. + er = *it; + auto bytes = er.RemainingBytes(); + MOZ_ASSERT(bytes < + ProfileBufferChunkManager::scExpectedMaximumStackSize); + tempBuffer.Put(bytes, [&](Maybe<ProfileBufferEntryWriter>& aEW) { + MOZ_ASSERT(aEW.isSome(), "tempBuffer cannot be out-of-session"); + aEW->WriteFromReader(er, bytes); + }); + // CompactStack marks the end, we're done. + break; + } + + MOZ_ASSERT(kind >= ProfileBufferEntry::Kind::LEGACY_LIMIT, + "There should be no legacy entries between " + "TimeBeforeCompactStack and CompactStack"); + er.SetRemainingBytes(0); + // Here, we have encountered a non-legacy entry that was not the + // CompactStack we're looking for; just continue the search... + } + // We're done. + return true; + } + case ProfileBufferEntry::Kind::CounterKey: + case ProfileBufferEntry::Kind::Number: + case ProfileBufferEntry::Kind::Count: + // Don't copy anything not part of a thread's stack sample + break; + case ProfileBufferEntry::Kind::CounterId: + // CounterId is normally followed by Time - if so, we'd like + // to skip it. If we duplicate Time, it won't hurt anything, just + // waste buffer space (and this can happen if the CounterId has + // fallen off the end of the buffer, but Time (and Number/Count) + // are still in the buffer). + e.Next(); + if (e.Has() && e.Get().GetKind() != ProfileBufferEntry::Kind::Time) { + // this would only happen if there was an invalid sequence + // in the buffer. Don't skip it. + continue; + } + // we've skipped Time + break; + case ProfileBufferEntry::Kind::ProfilerOverheadTime: + // ProfilerOverheadTime is normally followed by + // ProfilerOverheadDuration*4 - if so, we'd like to skip it. Don't + // duplicate, as we are in the middle of a sampling and will soon + // capture its own overhead. + e.Next(); + // A missing Time would only happen if there was an invalid + // sequence in the buffer. Don't skip unexpected entry. + if (e.Has() && + e.Get().GetKind() != + ProfileBufferEntry::Kind::ProfilerOverheadDuration) { + continue; + } + e.Next(); + if (e.Has() && + e.Get().GetKind() != + ProfileBufferEntry::Kind::ProfilerOverheadDuration) { + continue; + } + e.Next(); + if (e.Has() && + e.Get().GetKind() != + ProfileBufferEntry::Kind::ProfilerOverheadDuration) { + continue; + } + e.Next(); + if (e.Has() && + e.Get().GetKind() != + ProfileBufferEntry::Kind::ProfilerOverheadDuration) { + continue; + } + // we've skipped ProfilerOverheadTime and + // ProfilerOverheadDuration*4. + break; + default: { + // Copy anything else we don't know about. + AddEntry(tempBuffer, e.Get()); + break; + } + } + e.Next(); + } + return true; + }); + + if (!ok) { + return false; + } + + // If the buffer was big enough, there won't be any cleared blocks. + if (tempBuffer.GetState().mClearedBlockCount != 0) { + // No need to try to read stack again as it won't fit. Reset the stored + // buffer position to Nothing(). + aLastSample.reset(); + return false; + } + + aLastSample = Some(AddThreadIdEntry(aThreadId)); + + mEntries.AppendContents(tempBuffer); + + return true; +} + +void ProfileBuffer::DiscardSamplesBeforeTime(double aTime) { + // This function does nothing! + // The duration limit will be removed from Firefox, see bug 1632365. + Unused << aTime; +} + +// END ProfileBuffer +//////////////////////////////////////////////////////////////////////// diff --git a/tools/profiler/core/ProfileBufferEntry.h b/tools/profiler/core/ProfileBufferEntry.h new file mode 100644 index 0000000000..3ce35e6b85 --- /dev/null +++ b/tools/profiler/core/ProfileBufferEntry.h @@ -0,0 +1,489 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfileBufferEntry_h +#define ProfileBufferEntry_h + +#include <cstdint> +#include <cstdlib> +#include <functional> +#include <utility> +#include "gtest/MozGtestFriend.h" +#include "js/ProfilingCategory.h" +#include "mozilla/Attributes.h" +#include "mozilla/HashFunctions.h" +#include "mozilla/HashTable.h" +#include "mozilla/Maybe.h" +#include "mozilla/ProfileBufferEntryKinds.h" +#include "mozilla/ProfileJSONWriter.h" +#include "mozilla/UniquePtr.h" +#include "mozilla/Variant.h" +#include "mozilla/Vector.h" +#include "nsString.h" + +class ProfilerCodeAddressService; +struct JSContext; + +class ProfileBufferEntry { + public: + using KindUnderlyingType = mozilla::ProfileBufferEntryKindUnderlyingType; + using Kind = mozilla::ProfileBufferEntryKind; + + ProfileBufferEntry(); + + static constexpr size_t kNumChars = mozilla::ProfileBufferEntryNumChars; + + private: + // aString must be a static string. + ProfileBufferEntry(Kind aKind, const char* aString); + ProfileBufferEntry(Kind aKind, char aChars[kNumChars]); + ProfileBufferEntry(Kind aKind, void* aPtr); + ProfileBufferEntry(Kind aKind, double aDouble); + ProfileBufferEntry(Kind aKind, int64_t aInt64); + ProfileBufferEntry(Kind aKind, uint64_t aUint64); + ProfileBufferEntry(Kind aKind, int aInt); + + public: +#define CTOR(KIND, TYPE, SIZE) \ + static ProfileBufferEntry KIND(TYPE aVal) { \ + return ProfileBufferEntry(Kind::KIND, aVal); \ + } + FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(CTOR) +#undef CTOR + + Kind GetKind() const { return mKind; } + +#define IS_KIND(KIND, TYPE, SIZE) \ + bool Is##KIND() const { return mKind == Kind::KIND; } + FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(IS_KIND) +#undef IS_KIND + + private: + FRIEND_TEST(ThreadProfile, InsertOneEntry); + FRIEND_TEST(ThreadProfile, InsertOneEntryWithTinyBuffer); + FRIEND_TEST(ThreadProfile, InsertEntriesNoWrap); + FRIEND_TEST(ThreadProfile, InsertEntriesWrap); + FRIEND_TEST(ThreadProfile, MemoryMeasure); + friend class ProfileBuffer; + + Kind mKind; + uint8_t mStorage[kNumChars]; + + const char* GetString() const; + void* GetPtr() const; + double GetDouble() const; + int GetInt() const; + int64_t GetInt64() const; + uint64_t GetUint64() const; + void CopyCharsInto(char (&aOutArray)[kNumChars]) const; +}; + +// Packed layout: 1 byte for the tag + 8 bytes for the value. +static_assert(sizeof(ProfileBufferEntry) == 9, "bad ProfileBufferEntry size"); + +// Contains all the information about JIT frames that is needed to stream stack +// frames for JitReturnAddr entries in the profiler buffer. +// Every return address (void*) is mapped to one or more JITFrameKeys, and +// every JITFrameKey is mapped to a JSON string for that frame. +// mRangeStart and mRangeEnd describe the range in the buffer for which this +// mapping is valid. Only JitReturnAddr entries within that buffer range can be +// processed using this JITFrameInfoForBufferRange object. +struct JITFrameInfoForBufferRange final { + JITFrameInfoForBufferRange Clone() const; + + uint64_t mRangeStart; + uint64_t mRangeEnd; // mRangeEnd marks the first invalid index. + + struct JITFrameKey { + bool operator==(const JITFrameKey& aOther) const { + return mCanonicalAddress == aOther.mCanonicalAddress && + mDepth == aOther.mDepth; + } + bool operator!=(const JITFrameKey& aOther) const { + return !(*this == aOther); + } + + void* mCanonicalAddress; + uint32_t mDepth; + }; + struct JITFrameKeyHasher { + using Lookup = JITFrameKey; + + static mozilla::HashNumber hash(const JITFrameKey& aLookup) { + mozilla::HashNumber hash = 0; + hash = mozilla::AddToHash(hash, aLookup.mCanonicalAddress); + hash = mozilla::AddToHash(hash, aLookup.mDepth); + return hash; + } + + static bool match(const JITFrameKey& aKey, const JITFrameKey& aLookup) { + return aKey == aLookup; + } + + static void rekey(JITFrameKey& aKey, const JITFrameKey& aNewKey) { + aKey = aNewKey; + } + }; + + using JITAddressToJITFramesMap = + mozilla::HashMap<void*, mozilla::Vector<JITFrameKey>>; + JITAddressToJITFramesMap mJITAddressToJITFramesMap; + using JITFrameToFrameJSONMap = + mozilla::HashMap<JITFrameKey, nsCString, JITFrameKeyHasher>; + JITFrameToFrameJSONMap mJITFrameToFrameJSONMap; +}; + +// Contains JITFrameInfoForBufferRange objects for multiple profiler buffer +// ranges. +struct JITFrameInfo final { + JITFrameInfo() : mUniqueStrings(mozilla::MakeUnique<UniqueJSONStrings>()) {} + + MOZ_IMPLICIT JITFrameInfo(const JITFrameInfo& aOther); + + // Creates a new JITFrameInfoForBufferRange object in mRanges by looking up + // information about the provided JIT return addresses using aCx. + // Addresses are provided like this: + // The caller of AddInfoForRange supplies a function in aJITAddressProvider. + // This function will be called once, synchronously, with an + // aJITAddressConsumer argument, which is a function that needs to be called + // for every address. That function can be called multiple times for the same + // address. + void AddInfoForRange( + uint64_t aRangeStart, uint64_t aRangeEnd, JSContext* aCx, + const std::function<void(const std::function<void(void*)>&)>& + aJITAddressProvider); + + // Returns whether the information stored in this object is still relevant + // for any entries in the buffer. + bool HasExpired(uint64_t aCurrentBufferRangeStart) const { + if (mRanges.empty()) { + // No information means no relevant information. Allow this object to be + // discarded. + return true; + } + return mRanges.back().mRangeEnd <= aCurrentBufferRangeStart; + } + + // The array of ranges of JIT frame information, sorted by buffer position. + // Ranges are non-overlapping. + // The JSON of the cached frames can contain string indexes, which refer + // to strings in mUniqueStrings. + mozilla::Vector<JITFrameInfoForBufferRange> mRanges; + + // The string table which contains strings used in the frame JSON that's + // cached in mRanges. + mozilla::UniquePtr<UniqueJSONStrings> mUniqueStrings; +}; + +class UniqueStacks { + public: + struct FrameKey { + explicit FrameKey(const char* aLocation) + : mData(NormalFrameData{nsCString(aLocation), false, false, 0, + mozilla::Nothing(), mozilla::Nothing()}) {} + + FrameKey(nsCString&& aLocation, bool aRelevantForJS, bool aBaselineInterp, + uint64_t aInnerWindowID, const mozilla::Maybe<unsigned>& aLine, + const mozilla::Maybe<unsigned>& aColumn, + const mozilla::Maybe<JS::ProfilingCategoryPair>& aCategoryPair) + : mData(NormalFrameData{aLocation, aRelevantForJS, aBaselineInterp, + aInnerWindowID, aLine, aColumn, + aCategoryPair}) {} + + FrameKey(void* aJITAddress, uint32_t aJITDepth, uint32_t aRangeIndex) + : mData(JITFrameData{aJITAddress, aJITDepth, aRangeIndex}) {} + + FrameKey(const FrameKey& aToCopy) = default; + + uint32_t Hash() const; + bool operator==(const FrameKey& aOther) const { + return mData == aOther.mData; + } + + struct NormalFrameData { + bool operator==(const NormalFrameData& aOther) const; + + nsCString mLocation; + bool mRelevantForJS; + bool mBaselineInterp; + uint64_t mInnerWindowID; + mozilla::Maybe<unsigned> mLine; + mozilla::Maybe<unsigned> mColumn; + mozilla::Maybe<JS::ProfilingCategoryPair> mCategoryPair; + }; + struct JITFrameData { + bool operator==(const JITFrameData& aOther) const; + + void* mCanonicalAddress; + uint32_t mDepth; + uint32_t mRangeIndex; + }; + mozilla::Variant<NormalFrameData, JITFrameData> mData; + }; + + struct FrameKeyHasher { + using Lookup = FrameKey; + + static mozilla::HashNumber hash(const FrameKey& aLookup) { + mozilla::HashNumber hash = 0; + if (aLookup.mData.is<FrameKey::NormalFrameData>()) { + const FrameKey::NormalFrameData& data = + aLookup.mData.as<FrameKey::NormalFrameData>(); + if (!data.mLocation.IsEmpty()) { + hash = mozilla::AddToHash(hash, + mozilla::HashString(data.mLocation.get())); + } + hash = mozilla::AddToHash(hash, data.mRelevantForJS); + hash = mozilla::AddToHash(hash, data.mBaselineInterp); + hash = mozilla::AddToHash(hash, data.mInnerWindowID); + if (data.mLine.isSome()) { + hash = mozilla::AddToHash(hash, *data.mLine); + } + if (data.mColumn.isSome()) { + hash = mozilla::AddToHash(hash, *data.mColumn); + } + if (data.mCategoryPair.isSome()) { + hash = mozilla::AddToHash(hash, + static_cast<uint32_t>(*data.mCategoryPair)); + } + } else { + const FrameKey::JITFrameData& data = + aLookup.mData.as<FrameKey::JITFrameData>(); + hash = mozilla::AddToHash(hash, data.mCanonicalAddress); + hash = mozilla::AddToHash(hash, data.mDepth); + hash = mozilla::AddToHash(hash, data.mRangeIndex); + } + return hash; + } + + static bool match(const FrameKey& aKey, const FrameKey& aLookup) { + return aKey == aLookup; + } + + static void rekey(FrameKey& aKey, const FrameKey& aNewKey) { + aKey = aNewKey; + } + }; + + struct StackKey { + mozilla::Maybe<uint32_t> mPrefixStackIndex; + uint32_t mFrameIndex; + + explicit StackKey(uint32_t aFrame) + : mFrameIndex(aFrame), mHash(mozilla::HashGeneric(aFrame)) {} + + StackKey(const StackKey& aPrefix, uint32_t aPrefixStackIndex, + uint32_t aFrame) + : mPrefixStackIndex(mozilla::Some(aPrefixStackIndex)), + mFrameIndex(aFrame), + mHash(mozilla::AddToHash(aPrefix.mHash, aFrame)) {} + + mozilla::HashNumber Hash() const { return mHash; } + + bool operator==(const StackKey& aOther) const { + return mPrefixStackIndex == aOther.mPrefixStackIndex && + mFrameIndex == aOther.mFrameIndex; + } + + private: + mozilla::HashNumber mHash; + }; + + struct StackKeyHasher { + using Lookup = StackKey; + + static mozilla::HashNumber hash(const StackKey& aLookup) { + return aLookup.Hash(); + } + + static bool match(const StackKey& aKey, const StackKey& aLookup) { + return aKey == aLookup; + } + + static void rekey(StackKey& aKey, const StackKey& aNewKey) { + aKey = aNewKey; + } + }; + + explicit UniqueStacks(JITFrameInfo&& aJITFrameInfo); + + // Return a StackKey for aFrame as the stack's root frame (no prefix). + [[nodiscard]] StackKey BeginStack(const FrameKey& aFrame); + + // Return a new StackKey that is obtained by appending aFrame to aStack. + [[nodiscard]] StackKey AppendFrame(const StackKey& aStack, + const FrameKey& aFrame); + + // Look up frame keys for the given JIT address, and ensure that our frame + // table has entries for the returned frame keys. The JSON for these frames + // is taken from mJITInfoRanges. + // aBufferPosition is needed in order to look up the correct JIT frame info + // object in mJITInfoRanges. + [[nodiscard]] mozilla::Maybe<mozilla::Vector<UniqueStacks::FrameKey>> + LookupFramesForJITAddressFromBufferPos(void* aJITAddress, + uint64_t aBufferPosition); + + [[nodiscard]] uint32_t GetOrAddFrameIndex(const FrameKey& aFrame); + [[nodiscard]] uint32_t GetOrAddStackIndex(const StackKey& aStack); + + void SpliceFrameTableElements(SpliceableJSONWriter& aWriter); + void SpliceStackTableElements(SpliceableJSONWriter& aWriter); + + private: + void StreamNonJITFrame(const FrameKey& aFrame); + void StreamStack(const StackKey& aStack); + + public: + mozilla::UniquePtr<UniqueJSONStrings> mUniqueStrings; + + ProfilerCodeAddressService* mCodeAddressService = nullptr; + + private: + SpliceableChunkedJSONWriter mFrameTableWriter; + mozilla::HashMap<FrameKey, uint32_t, FrameKeyHasher> mFrameToIndexMap; + + SpliceableChunkedJSONWriter mStackTableWriter; + mozilla::HashMap<StackKey, uint32_t, StackKeyHasher> mStackToIndexMap; + + mozilla::Vector<JITFrameInfoForBufferRange> mJITInfoRanges; +}; + +// +// Thread profile JSON Format +// -------------------------- +// +// The profile contains much duplicate information. The output JSON of the +// profile attempts to deduplicate strings, frames, and stack prefixes, to cut +// down on size and to increase JSON streaming speed. Deduplicated values are +// streamed as indices into their respective tables. +// +// Further, arrays of objects with the same set of properties (e.g., samples, +// frames) are output as arrays according to a schema instead of an object +// with property names. A property that is not present is represented in the +// array as null or undefined. +// +// The format of the thread profile JSON is shown by the following example +// with 1 sample and 1 marker: +// +// { +// "name": "Foo", +// "tid": 42, +// "samples": +// { +// "schema": +// { +// "stack": 0, /* index into stackTable */ +// "time": 1, /* number */ +// "eventDelay": 2, /* number */ +// "ThreadCPUDelta": 3, /* optional number */ +// }, +// "data": +// [ +// [ 1, 0.0, 0.0 ] /* { stack: 1, time: 0.0, eventDelay: 0.0 } */ +// ] +// }, +// +// "markers": +// { +// "schema": +// { +// "name": 0, /* index into stringTable */ +// "time": 1, /* number */ +// "data": 2 /* arbitrary JSON */ +// }, +// "data": +// [ +// [ 3, 0.1 ] /* { name: 'example marker', time: 0.1 } */ +// ] +// }, +// +// "stackTable": +// { +// "schema": +// { +// "prefix": 0, /* index into stackTable */ +// "frame": 1 /* index into frameTable */ +// }, +// "data": +// [ +// [ null, 0 ], /* (root) */ +// [ 0, 1 ] /* (root) > foo.js */ +// ] +// }, +// +// "frameTable": +// { +// "schema": +// { +// "location": 0, /* index into stringTable */ +// "relevantForJS": 1, /* bool */ +// "innerWindowID": 2, /* inner window ID of global JS `window` object */ +// "implementation": 3, /* index into stringTable */ +// "optimizations": 4, /* arbitrary JSON */ +// "line": 5, /* number */ +// "column": 6, /* number */ +// "category": 7, /* index into profile.meta.categories */ +// "subcategory": 8 /* index into +// profile.meta.categories[category].subcategories */ +// }, +// "data": +// [ +// [ 0 ], /* { location: '(root)' } */ +// [ 1, null, null, 2 ] /* { location: 'foo.js', +// implementation: 'baseline' } */ +// ] +// }, +// +// "stringTable": +// [ +// "(root)", +// "foo.js", +// "baseline", +// "example marker" +// ] +// } +// +// Process: +// { +// "name": "Bar", +// "pid": 24, +// "threads": +// [ +// <0-N threads from above> +// ], +// "counters": /* includes the memory counter */ +// [ +// { +// "name": "qwerty", +// "category": "uiop", +// "description": "this is qwerty uiop", +// "sample_groups: +// [ +// { +// "id": 42, /* number (thread id, or object identifier (tab), etc) */ +// "samples: +// { +// "schema": +// { +// "time": 1, /* number */ +// "number": 2, /* number (of times the counter was touched) */ +// "count": 3 /* number (total for the counter) */ +// }, +// "data": +// [ +// [ 0.1, 1824, +// 454622 ] /* { time: 0.1, number: 1824, count: 454622 } */ +// ] +// }, +// }, +// /* more sample-group objects with different id's */ +// ] +// }, +// /* more counters */ +// ], +// } +// +#endif /* ndef ProfileBufferEntry_h */ diff --git a/tools/profiler/core/ProfiledThreadData.cpp b/tools/profiler/core/ProfiledThreadData.cpp new file mode 100644 index 0000000000..772a9fba1f --- /dev/null +++ b/tools/profiler/core/ProfiledThreadData.cpp @@ -0,0 +1,327 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ProfiledThreadData.h" + +#include "platform.h" +#include "ProfileBuffer.h" + +#include "js/TraceLoggerAPI.h" +#include "mozilla/ProfileJSONWriter.h" +#include "mozilla/Span.h" +#include "nsXULAppAPI.h" + +#if defined(GP_OS_darwin) +# include <pthread.h> +#endif + +ProfiledThreadData::ProfiledThreadData(ThreadInfo* aThreadInfo, + nsIEventTarget* aEventTarget) + : mThreadInfo(aThreadInfo) { + MOZ_COUNT_CTOR(ProfiledThreadData); +} + +ProfiledThreadData::~ProfiledThreadData() { + MOZ_COUNT_DTOR(ProfiledThreadData); +} + +void ProfiledThreadData::StreamJSON( + const ProfileBuffer& aBuffer, JSContext* aCx, SpliceableJSONWriter& aWriter, + const nsACString& aProcessName, const nsACString& aETLDplus1, + const mozilla::TimeStamp& aProcessStartTime, double aSinceTime, + bool JSTracerEnabled, ProfilerCodeAddressService* aService) { + if (mJITFrameInfoForPreviousJSContexts && + mJITFrameInfoForPreviousJSContexts->HasExpired( + aBuffer.BufferRangeStart())) { + mJITFrameInfoForPreviousJSContexts = nullptr; + } + + // If we have an existing JITFrameInfo in mJITFrameInfoForPreviousJSContexts, + // copy the data from it. + JITFrameInfo jitFrameInfo = + mJITFrameInfoForPreviousJSContexts + ? JITFrameInfo(*mJITFrameInfoForPreviousJSContexts) + : JITFrameInfo(); + + if (aCx && mBufferPositionWhenReceivedJSContext) { + aBuffer.AddJITInfoForRange(*mBufferPositionWhenReceivedJSContext, + mThreadInfo->ThreadId(), aCx, jitFrameInfo); + } + + UniqueStacks uniqueStacks(std::move(jitFrameInfo)); + uniqueStacks.mCodeAddressService = aService; + + MOZ_ASSERT(uniqueStacks.mUniqueStrings); + aWriter.SetUniqueStrings(*uniqueStacks.mUniqueStrings); + + aWriter.Start(); + { + StreamSamplesAndMarkers(mThreadInfo->Name(), mThreadInfo->ThreadId(), + aBuffer, aWriter, aProcessName, aETLDplus1, + aProcessStartTime, mThreadInfo->RegisterTime(), + mUnregisterTime, aSinceTime, uniqueStacks); + + aWriter.StartObjectProperty("stackTable"); + { + { + JSONSchemaWriter schema(aWriter); + schema.WriteField("prefix"); + schema.WriteField("frame"); + } + + aWriter.StartArrayProperty("data"); + { uniqueStacks.SpliceStackTableElements(aWriter); } + aWriter.EndArray(); + } + aWriter.EndObject(); + + aWriter.StartObjectProperty("frameTable"); + { + { + JSONSchemaWriter schema(aWriter); + schema.WriteField("location"); + schema.WriteField("relevantForJS"); + schema.WriteField("innerWindowID"); + schema.WriteField("implementation"); + schema.WriteField("optimizations"); + schema.WriteField("line"); + schema.WriteField("column"); + schema.WriteField("category"); + schema.WriteField("subcategory"); + } + + aWriter.StartArrayProperty("data"); + { uniqueStacks.SpliceFrameTableElements(aWriter); } + aWriter.EndArray(); + } + aWriter.EndObject(); + + aWriter.StartArrayProperty("stringTable"); + { + std::move(*uniqueStacks.mUniqueStrings) + .SpliceStringTableElements(aWriter); + } + aWriter.EndArray(); + } + + if (aCx && JSTracerEnabled) { + StreamTraceLoggerJSON(aCx, aWriter, aProcessStartTime); + } + + aWriter.End(); + + aWriter.ResetUniqueStrings(); +} + +void ProfiledThreadData::StreamTraceLoggerJSON( + JSContext* aCx, SpliceableJSONWriter& aWriter, + const mozilla::TimeStamp& aProcessStartTime) { + aWriter.StartObjectProperty("jsTracerEvents"); + { + JS::AutoTraceLoggerLockGuard lockGuard; + JS::SpewTraceLoggerThread(aCx); + + uint32_t length = 0; + + // Collect Event Ids + aWriter.StartArrayProperty("events", mozilla::JSONWriter::SingleLineStyle); + { + JS::TraceLoggerIdBuffer collectionBuffer(lockGuard, aCx); + while (collectionBuffer.NextChunk()) { + for (uint32_t val : collectionBuffer) { + aWriter.IntElement(val); + length++; + } + } + } + aWriter.EndArray(); + + // Collect Event Timestamps + aWriter.StartArrayProperty("timestamps", + mozilla::JSONWriter::SingleLineStyle); + { + JS::TraceLoggerTimeStampBuffer collectionBuffer(lockGuard, aCx); + while (collectionBuffer.NextChunk()) { + for (mozilla::TimeStamp val : collectionBuffer) { + aWriter.DoubleElement((val - aProcessStartTime).ToMicroseconds()); + } + } + } + aWriter.EndArray(); + + // Collect Event Durations + aWriter.StartArrayProperty("durations", + mozilla::JSONWriter::SingleLineStyle); + { + JS::TraceLoggerDurationBuffer collectionBuffer(lockGuard, aCx); + while (collectionBuffer.NextChunk()) { + for (double val : collectionBuffer) { + if (val == -1) { + aWriter.NullElement(); + } else { + aWriter.DoubleElement(val); + } + } + } + } + aWriter.EndArray(); + + // Collect Event LineNo + aWriter.StartArrayProperty("line", mozilla::JSONWriter::SingleLineStyle); + { + JS::TraceLoggerLineNoBuffer collectionBuffer(lockGuard, aCx); + while (collectionBuffer.NextChunk()) { + for (int32_t val : collectionBuffer) { + if (val == -1) { + aWriter.NullElement(); + } else { + aWriter.IntElement(val); + } + } + } + } + aWriter.EndArray(); + + // Collect Event ColNo + aWriter.StartArrayProperty("column", mozilla::JSONWriter::SingleLineStyle); + { + JS::TraceLoggerColNoBuffer collectionBuffer(lockGuard, aCx); + while (collectionBuffer.NextChunk()) { + for (int32_t val : collectionBuffer) { + if (val == -1) { + aWriter.NullElement(); + } else { + aWriter.IntElement(val); + } + } + } + } + aWriter.EndArray(); + + aWriter.IntProperty("length", length); + } + aWriter.EndObject(); +} + +int StreamSamplesAndMarkers(const char* aName, int aThreadId, + const ProfileBuffer& aBuffer, + SpliceableJSONWriter& aWriter, + const nsACString& aProcessName, + const nsACString& aETLDplus1, + const mozilla::TimeStamp& aProcessStartTime, + const mozilla::TimeStamp& aRegisterTime, + const mozilla::TimeStamp& aUnregisterTime, + double aSinceTime, UniqueStacks& aUniqueStacks) { + int processedThreadId = 0; + + aWriter.StringProperty("processType", + mozilla::MakeStringSpan(XRE_GetProcessTypeString())); + + aWriter.StringProperty("name", mozilla::MakeStringSpan(aName)); + + // Use given process name (if any), unless we're the parent process. + if (XRE_IsParentProcess()) { + aWriter.StringProperty("processName", "Parent Process"); + } else if (!aProcessName.IsEmpty()) { + aWriter.StringProperty("processName", aProcessName); + } + if (!aETLDplus1.IsEmpty()) { + aWriter.StringProperty("eTLD+1", aETLDplus1); + } + + if (aRegisterTime) { + aWriter.DoubleProperty( + "registerTime", (aRegisterTime - aProcessStartTime).ToMilliseconds()); + } else { + aWriter.NullProperty("registerTime"); + } + + if (aUnregisterTime) { + aWriter.DoubleProperty( + "unregisterTime", + (aUnregisterTime - aProcessStartTime).ToMilliseconds()); + } else { + aWriter.NullProperty("unregisterTime"); + } + + aWriter.StartObjectProperty("samples"); + { + { + JSONSchemaWriter schema(aWriter); + schema.WriteField("stack"); + schema.WriteField("time"); + schema.WriteField("eventDelay"); +#define RUNNING_TIME_FIELD(index, name, unit, jsonProperty) \ + schema.WriteField(#jsonProperty); + PROFILER_FOR_EACH_RUNNING_TIME(RUNNING_TIME_FIELD) +#undef RUNNING_TIME_FIELD + } + + aWriter.StartArrayProperty("data"); + { + processedThreadId = aBuffer.StreamSamplesToJSON( + aWriter, aThreadId, aSinceTime, aUniqueStacks); + } + aWriter.EndArray(); + } + aWriter.EndObject(); + + aWriter.StartObjectProperty("markers"); + { + { + JSONSchemaWriter schema(aWriter); + schema.WriteField("name"); + schema.WriteField("startTime"); + schema.WriteField("endTime"); + schema.WriteField("phase"); + schema.WriteField("category"); + schema.WriteField("data"); + } + + aWriter.StartArrayProperty("data"); + { + aBuffer.StreamMarkersToJSON(aWriter, aThreadId, aProcessStartTime, + aSinceTime, aUniqueStacks); + } + aWriter.EndArray(); + } + aWriter.EndObject(); + + aWriter.IntProperty("pid", + static_cast<int64_t>(profiler_current_process_id())); + aWriter.IntProperty( + "tid", + static_cast<int64_t>(aThreadId != 0 ? aThreadId : processedThreadId)); + + return processedThreadId; +} + +void ProfiledThreadData::NotifyAboutToLoseJSContext( + JSContext* aContext, const mozilla::TimeStamp& aProcessStartTime, + ProfileBuffer& aBuffer) { + if (!mBufferPositionWhenReceivedJSContext) { + return; + } + + MOZ_RELEASE_ASSERT(aContext); + + if (mJITFrameInfoForPreviousJSContexts && + mJITFrameInfoForPreviousJSContexts->HasExpired( + aBuffer.BufferRangeStart())) { + mJITFrameInfoForPreviousJSContexts = nullptr; + } + + mozilla::UniquePtr<JITFrameInfo> jitFrameInfo = + mJITFrameInfoForPreviousJSContexts + ? std::move(mJITFrameInfoForPreviousJSContexts) + : mozilla::MakeUnique<JITFrameInfo>(); + + aBuffer.AddJITInfoForRange(*mBufferPositionWhenReceivedJSContext, + mThreadInfo->ThreadId(), aContext, *jitFrameInfo); + + mJITFrameInfoForPreviousJSContexts = std::move(jitFrameInfo); + mBufferPositionWhenReceivedJSContext = mozilla::Nothing(); +} diff --git a/tools/profiler/core/ProfiledThreadData.h b/tools/profiler/core/ProfiledThreadData.h new file mode 100644 index 0000000000..2ef4a62e98 --- /dev/null +++ b/tools/profiler/core/ProfiledThreadData.h @@ -0,0 +1,140 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfiledThreadData_h +#define ProfiledThreadData_h + +#include "ThreadInfo.h" + +#include "mozilla/Maybe.h" +#include "mozilla/TimeStamp.h" +#include "mozilla/UniquePtr.h" +#include "mozilla/RefPtr.h" +#include "nsStringFwd.h" + +class ProfileBuffer; +class ProfilerCodeAddressService; +class UniqueStacks; +class nsIEventTarget; +struct JITFrameInfo; +struct JSContext; + +namespace mozilla::baseprofiler { +class SpliceableJSONWriter; +} + +// This class contains information about a thread that is only relevant while +// the profiler is running, for any threads (both alive and dead) whose thread +// name matches the "thread filter" in the current profiler run. +// ProfiledThreadData objects may be kept alive even after the thread is +// unregistered, as long as there is still data for that thread in the profiler +// buffer. +// +// Accesses to this class are protected by the profiler state lock. +// +// Created as soon as the following are true for the thread: +// - The profiler is running, and +// - the thread matches the profiler's thread filter, and +// - the thread is registered with the profiler. +// So it gets created in response to either (1) the profiler being started (for +// an existing registered thread) or (2) the thread being registered (if the +// profiler is already running). +// +// The thread may be unregistered during the lifetime of ProfiledThreadData. +// If that happens, NotifyUnregistered() is called. +// +// This class is the right place to store buffer positions. Profiler buffer +// positions become invalid if the profiler buffer is destroyed, which happens +// when the profiler is stopped. +class ProfiledThreadData final { + public: + ProfiledThreadData(ThreadInfo* aThreadInfo, nsIEventTarget* aEventTarget); + ~ProfiledThreadData(); + + void NotifyUnregistered(uint64_t aBufferPosition) { + mLastSample = mozilla::Nothing(); + MOZ_ASSERT(!mBufferPositionWhenReceivedJSContext, + "JSContext should have been cleared before the thread was " + "unregistered"); + mUnregisterTime = mozilla::TimeStamp::NowUnfuzzed(); + mBufferPositionWhenUnregistered = mozilla::Some(aBufferPosition); + } + mozilla::Maybe<uint64_t> BufferPositionWhenUnregistered() { + return mBufferPositionWhenUnregistered; + } + + mozilla::Maybe<uint64_t>& LastSample() { return mLastSample; } + + void StreamJSON(const ProfileBuffer& aBuffer, JSContext* aCx, + mozilla::baseprofiler::SpliceableJSONWriter& aWriter, + const nsACString& aProcessName, const nsACString& aETLDplus1, + const mozilla::TimeStamp& aProcessStartTime, + double aSinceTime, bool aJSTracerEnabled, + ProfilerCodeAddressService* aService); + + void StreamTraceLoggerJSON( + JSContext* aCx, mozilla::baseprofiler::SpliceableJSONWriter& aWriter, + const mozilla::TimeStamp& aProcessStartTime); + + const RefPtr<ThreadInfo> Info() const { return mThreadInfo; } + + void NotifyReceivedJSContext(uint64_t aCurrentBufferPosition) { + mBufferPositionWhenReceivedJSContext = + mozilla::Some(aCurrentBufferPosition); + } + + // Call this method when the JS entries inside the buffer are about to + // become invalid, i.e., just before JS shutdown. + void NotifyAboutToLoseJSContext(JSContext* aCx, + const mozilla::TimeStamp& aProcessStartTime, + ProfileBuffer& aBuffer); + + private: + // Group A: + // The following fields are interesting for the entire lifetime of a + // ProfiledThreadData object. + + // This thread's thread info. + const RefPtr<ThreadInfo> mThreadInfo; + + // Contains JSON for JIT frames from any JSContexts that were used for this + // thread in the past. + // Null if this thread has never lost a JSContext or if all samples from + // previous JSContexts have been evicted from the profiler buffer. + mozilla::UniquePtr<JITFrameInfo> mJITFrameInfoForPreviousJSContexts; + + // Group B: + // The following fields are only used while this thread is alive and + // registered. They become Nothing() once the thread is unregistered. + + // When sampling, this holds the position in ActivePS::mBuffer of the most + // recent sample for this thread, or Nothing() if there is no sample for this + // thread in the buffer. + mozilla::Maybe<uint64_t> mLastSample; + + // Only non-Nothing() if the thread currently has a JSContext. + mozilla::Maybe<uint64_t> mBufferPositionWhenReceivedJSContext; + + // Group C: + // The following fields are only used once this thread has been unregistered. + + mozilla::Maybe<uint64_t> mBufferPositionWhenUnregistered; + mozilla::TimeStamp mUnregisterTime; +}; + +// Stream all samples and markers from aBuffer with the given aThreadId (or 0 +// for everything, which is assumed to be a single backtrace sample.) +// Returns the thread id of the output sample(s), or 0 if none was present. +int StreamSamplesAndMarkers( + const char* aName, int aThreadId, const ProfileBuffer& aBuffer, + mozilla::baseprofiler::SpliceableJSONWriter& aWriter, + const nsACString& aProcessName, const nsACString& aETLDplus1, + const mozilla::TimeStamp& aProcessStartTime, + const mozilla::TimeStamp& aRegisterTime, + const mozilla::TimeStamp& aUnregisterTime, double aSinceTime, + UniqueStacks& aUniqueStacks); + +#endif // ProfiledThreadData_h diff --git a/tools/profiler/core/ProfilerBacktrace.cpp b/tools/profiler/core/ProfilerBacktrace.cpp new file mode 100644 index 0000000000..02adb4e38f --- /dev/null +++ b/tools/profiler/core/ProfilerBacktrace.cpp @@ -0,0 +1,102 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ProfilerBacktrace.h" + +#include "ProfileBuffer.h" +#include "ProfiledThreadData.h" +#include "ThreadInfo.h" + +#include "mozilla/ProfileJSONWriter.h" + +ProfilerBacktrace::ProfilerBacktrace( + const char* aName, + mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> + aProfileChunkedBufferStorage, + mozilla::UniquePtr<ProfileBuffer> + aProfileBufferStorageOrNull /* = nullptr */) + : mName(aName), + mOptionalProfileChunkedBufferStorage( + std::move(aProfileChunkedBufferStorage)), + mProfileChunkedBuffer(mOptionalProfileChunkedBufferStorage.get()), + mOptionalProfileBufferStorage(std::move(aProfileBufferStorageOrNull)), + mProfileBuffer(mOptionalProfileBufferStorage.get()) { + MOZ_COUNT_CTOR(ProfilerBacktrace); + if (mProfileBuffer) { + MOZ_RELEASE_ASSERT(mProfileChunkedBuffer, + "If we take ownership of a ProfileBuffer, we must also " + "receive ownership of a ProfileChunkedBuffer"); + MOZ_RELEASE_ASSERT( + mProfileChunkedBuffer == &mProfileBuffer->UnderlyingChunkedBuffer(), + "If we take ownership of a ProfileBuffer, we must also receive " + "ownership of its ProfileChunkedBuffer"); + } + MOZ_ASSERT( + !mProfileChunkedBuffer || !mProfileChunkedBuffer->IsThreadSafe(), + "ProfilerBacktrace only takes a non-thread-safe ProfileChunkedBuffer"); +} + +ProfilerBacktrace::ProfilerBacktrace( + const char* aName, + mozilla::ProfileChunkedBuffer* aExternalProfileChunkedBuffer, + ProfileBuffer* aExternalProfileBuffer) + : mName(aName), + mProfileChunkedBuffer(aExternalProfileChunkedBuffer), + mProfileBuffer(aExternalProfileBuffer) { + MOZ_COUNT_CTOR(ProfilerBacktrace); + if (!mProfileChunkedBuffer) { + if (mProfileBuffer) { + // We don't have a ProfileChunkedBuffer but we have a ProfileBuffer, use + // the latter's ProfileChunkedBuffer. + mProfileChunkedBuffer = &mProfileBuffer->UnderlyingChunkedBuffer(); + MOZ_ASSERT(!mProfileChunkedBuffer->IsThreadSafe(), + "ProfilerBacktrace only takes a non-thread-safe " + "ProfileChunkedBuffer"); + } + } else { + if (mProfileBuffer) { + MOZ_RELEASE_ASSERT( + mProfileChunkedBuffer == &mProfileBuffer->UnderlyingChunkedBuffer(), + "If we reference both ProfileChunkedBuffer and ProfileBuffer, they " + "must already be connected"); + } + MOZ_ASSERT(!mProfileChunkedBuffer->IsThreadSafe(), + "ProfilerBacktrace only takes a non-thread-safe " + "ProfileChunkedBuffer"); + } +} + +ProfilerBacktrace::~ProfilerBacktrace() { MOZ_COUNT_DTOR(ProfilerBacktrace); } + +int ProfilerBacktrace::StreamJSON(SpliceableJSONWriter& aWriter, + const mozilla::TimeStamp& aProcessStartTime, + UniqueStacks& aUniqueStacks) { + int processedThreadId = 0; + + // Unlike ProfiledThreadData::StreamJSON, we don't need to call + // ProfileBuffer::AddJITInfoForRange because ProfileBuffer does not contain + // any JitReturnAddr entries. For synchronous samples, JIT frames get expanded + // at sample time. + if (mProfileBuffer) { + processedThreadId = + StreamSamplesAndMarkers(mName.c_str(), 0, *mProfileBuffer, aWriter, + ""_ns, ""_ns, aProcessStartTime, + /* aRegisterTime */ mozilla::TimeStamp(), + /* aUnregisterTime */ mozilla::TimeStamp(), + /* aSinceTime */ 0, aUniqueStacks); + } else if (mProfileChunkedBuffer) { + ProfileBuffer profileBuffer(*mProfileChunkedBuffer); + processedThreadId = + StreamSamplesAndMarkers(mName.c_str(), 0, profileBuffer, aWriter, ""_ns, + ""_ns, aProcessStartTime, + /* aRegisterTime */ mozilla::TimeStamp(), + /* aUnregisterTime */ mozilla::TimeStamp(), + /* aSinceTime */ 0, aUniqueStacks); + } + // If there are no buffers, the backtrace is empty and nothing is streamed. + + return processedThreadId; +} diff --git a/tools/profiler/core/ProfilerBacktrace.h b/tools/profiler/core/ProfilerBacktrace.h new file mode 100644 index 0000000000..b6c099438b --- /dev/null +++ b/tools/profiler/core/ProfilerBacktrace.h @@ -0,0 +1,184 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef __PROFILER_BACKTRACE_H +#define __PROFILER_BACKTRACE_H + +#include "ProfileBuffer.h" + +#include "mozilla/ProfileBufferEntrySerialization.h" +#include "mozilla/UniquePtrExtensions.h" + +#include <string> + +class ProfileBuffer; +class ProfilerCodeAddressService; +class ThreadInfo; +class UniqueStacks; + +namespace mozilla { +class ProfileChunkedBuffer; +class TimeStamp; +namespace baseprofiler { +class SpliceableJSONWriter; +} // namespace baseprofiler +} // namespace mozilla + +// ProfilerBacktrace encapsulates a synchronous sample. +// It can work with a ProfileBuffer and/or a ProfileChunkedBuffer (if both, they +// must already be linked together). The ProfileChunkedBuffer contains all the +// data; the ProfileBuffer is not strictly needed, only provide it if it is +// already available at the call site. +// And these buffers can either be: +// - owned here, so that the ProfilerBacktrace object can be kept for later +// use), OR +// - referenced through pointers (in cases where the backtrace is immediately +// streamed out, so we only need temporary references to external buffers); +// these pointers may be null for empty backtraces. +class ProfilerBacktrace { + public: + // Take ownership of external buffers and use them to keep, and to stream a + // backtrace. If a ProfileBuffer is given, its underlying chunked buffer must + // be provided as well. + explicit ProfilerBacktrace( + const char* aName, + mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> + aProfileChunkedBufferStorage, + mozilla::UniquePtr<ProfileBuffer> aProfileBufferStorageOrNull = nullptr); + + // Take pointers to external buffers and use them to stream a backtrace. + // If null, the backtrace is effectively empty. + // If both are provided, they must already be connected. + explicit ProfilerBacktrace( + const char* aName, + mozilla::ProfileChunkedBuffer* aExternalProfileChunkedBufferOrNull = + nullptr, + ProfileBuffer* aExternalProfileBufferOrNull = nullptr); + + ~ProfilerBacktrace(); + + [[nodiscard]] bool IsEmpty() const { + return !mProfileChunkedBuffer || + mozilla::ProfileBufferEntryWriter::Serializer< + mozilla::ProfileChunkedBuffer>::Bytes(*mProfileChunkedBuffer) <= + mozilla::ULEB128Size(0u); + } + + // ProfilerBacktraces' stacks are deduplicated in the context of the + // profile that contains the backtrace as a marker payload. + // + // That is, markers that contain backtraces should not need their own stack, + // frame, and string tables. They should instead reuse their parent + // profile's tables. + int StreamJSON(mozilla::baseprofiler::SpliceableJSONWriter& aWriter, + const mozilla::TimeStamp& aProcessStartTime, + UniqueStacks& aUniqueStacks); + + private: + // Used to serialize a ProfilerBacktrace. + friend struct mozilla::ProfileBufferEntryWriter::Serializer< + ProfilerBacktrace>; + friend struct mozilla::ProfileBufferEntryReader::Deserializer< + ProfilerBacktrace>; + + std::string mName; + + // `ProfileChunkedBuffer` in which `mProfileBuffer` stores its data; must be + // located before `mProfileBuffer` so that it's destroyed after. + mozilla::UniquePtr<mozilla::ProfileChunkedBuffer> + mOptionalProfileChunkedBufferStorage; + // If null, there is no need to check mProfileBuffer's (if present) underlying + // buffer because this is done when constructed. + mozilla::ProfileChunkedBuffer* mProfileChunkedBuffer; + + mozilla::UniquePtr<ProfileBuffer> mOptionalProfileBufferStorage; + ProfileBuffer* mProfileBuffer; +}; + +namespace mozilla { + +// Format: [ UniquePtr<BlockRingsBuffer> | name ] +// Initial len==0 marks a nullptr or empty backtrace. +template <> +struct mozilla::ProfileBufferEntryWriter::Serializer<ProfilerBacktrace> { + static Length Bytes(const ProfilerBacktrace& aBacktrace) { + if (!aBacktrace.mProfileChunkedBuffer) { + // No buffer. + return ULEB128Size(0u); + } + auto bufferBytes = SumBytes(*aBacktrace.mProfileChunkedBuffer); + if (bufferBytes <= ULEB128Size(0u)) { + // Empty buffer. + return ULEB128Size(0u); + } + return bufferBytes + SumBytes(aBacktrace.mName); + } + + static void Write(mozilla::ProfileBufferEntryWriter& aEW, + const ProfilerBacktrace& aBacktrace) { + if (!aBacktrace.mProfileChunkedBuffer || + SumBytes(*aBacktrace.mProfileChunkedBuffer) <= ULEB128Size(0u)) { + // No buffer, or empty buffer. + aEW.WriteULEB128(0u); + return; + } + aEW.WriteObject(*aBacktrace.mProfileChunkedBuffer); + aEW.WriteObject(aBacktrace.mName); + } +}; + +template <typename Destructor> +struct mozilla::ProfileBufferEntryWriter::Serializer< + mozilla::UniquePtr<ProfilerBacktrace, Destructor>> { + static Length Bytes( + const mozilla::UniquePtr<ProfilerBacktrace, Destructor>& aBacktrace) { + if (!aBacktrace) { + // Null backtrace pointer (treated like an empty backtrace). + return ULEB128Size(0u); + } + return SumBytes(*aBacktrace); + } + + static void Write( + mozilla::ProfileBufferEntryWriter& aEW, + const mozilla::UniquePtr<ProfilerBacktrace, Destructor>& aBacktrace) { + if (!aBacktrace) { + // Null backtrace pointer (treated like an empty backtrace). + aEW.WriteULEB128(0u); + return; + } + aEW.WriteObject(*aBacktrace); + } +}; + +template <typename Destructor> +struct mozilla::ProfileBufferEntryReader::Deserializer< + mozilla::UniquePtr<ProfilerBacktrace, Destructor>> { + static void ReadInto( + mozilla::ProfileBufferEntryReader& aER, + mozilla::UniquePtr<ProfilerBacktrace, Destructor>& aBacktrace) { + aBacktrace = Read(aER); + } + + static mozilla::UniquePtr<ProfilerBacktrace, Destructor> Read( + mozilla::ProfileBufferEntryReader& aER) { + auto profileChunkedBuffer = + aER.ReadObject<UniquePtr<ProfileChunkedBuffer>>(); + if (!profileChunkedBuffer) { + return nullptr; + } + MOZ_ASSERT( + !profileChunkedBuffer->IsThreadSafe(), + "ProfilerBacktrace only stores non-thread-safe ProfileChunkedBuffers"); + std::string name = aER.ReadObject<std::string>(); + return UniquePtr<ProfilerBacktrace, Destructor>{ + new ProfilerBacktrace(name.c_str(), std::move(profileChunkedBuffer))}; + } +}; + +} // namespace mozilla + +#endif // __PROFILER_BACKTRACE_H diff --git a/tools/profiler/core/ProfilerCodeAddressService.cpp b/tools/profiler/core/ProfilerCodeAddressService.cpp new file mode 100644 index 0000000000..5a65e06379 --- /dev/null +++ b/tools/profiler/core/ProfilerCodeAddressService.cpp @@ -0,0 +1,75 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ProfilerCodeAddressService.h" + +#include "platform.h" +#include "mozilla/StackWalk.h" + +using namespace mozilla; + +#if defined(XP_LINUX) || defined(XP_FREEBSD) +static char* SearchSymbolTable(SymbolTable& aTable, uint32_t aOffset) { + size_t index; + bool exact = + BinarySearch(aTable.mAddrs, 0, aTable.mAddrs.Length(), aOffset, &index); + + if (index == 0 && !exact) { + // Our offset is before the first symbol in the table; no result. + return nullptr; + } + + // Extract the (mangled) symbol name out of the string table. + auto strings = reinterpret_cast<char*>(aTable.mBuffer.Elements()); + nsCString symbol; + symbol.Append(strings + aTable.mIndex[index - 1], + aTable.mIndex[index] - aTable.mIndex[index - 1]); + + // First try demangling as a Rust identifier. + char demangled[1024]; + if (!profiler_demangle_rust(symbol.get(), demangled, + ArrayLength(demangled))) { + // Then as a C++ identifier. + DemangleSymbol(symbol.get(), demangled, ArrayLength(demangled)); + } + demangled[ArrayLength(demangled) - 1] = '\0'; + + // Use the mangled name if we didn't successfully demangle. + return strdup(demangled[0] != '\0' ? demangled : symbol.get()); +} +#endif + +bool ProfilerCodeAddressService::GetFunction(const void* aPc, + nsACString& aResult) { + Entry& entry = GetEntry(aPc); + +#if defined(XP_LINUX) || defined(XP_FREEBSD) + // On Linux, most symbols will not be found by the MozDescribeCodeAddress call + // that GetEntry does. So we read the symbol table directly from the ELF + // image. + + // SymbolTable currently assumes library offsets will not be larger than + // 4 GiB. + if (entry.mLOffset <= 0xFFFFFFFF && !entry.mFunction) { + auto p = mSymbolTables.lookupForAdd(entry.mLibrary); + if (!p) { + if (!mSymbolTables.add(p, entry.mLibrary, SymbolTable())) { + MOZ_CRASH("ProfilerCodeAddressService OOM"); + } + profiler_get_symbol_table(entry.mLibrary, nullptr, &p->value()); + } + entry.mFunction = + SearchSymbolTable(p->value(), static_cast<uint32_t>(entry.mLOffset)); + } +#endif + + if (!entry.mFunction || entry.mFunction[0] == '\0') { + return false; + } + + aResult = nsDependentCString(entry.mFunction); + return true; +} diff --git a/tools/profiler/core/ProfilerMarkers.cpp b/tools/profiler/core/ProfilerMarkers.cpp new file mode 100644 index 0000000000..7c299678d1 --- /dev/null +++ b/tools/profiler/core/ProfilerMarkers.cpp @@ -0,0 +1,32 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/ProfilerMarkers.h" + +template mozilla::ProfileBufferBlockIndex AddMarkerToBuffer( + mozilla::ProfileChunkedBuffer&, const mozilla::ProfilerString8View&, + const mozilla::MarkerCategory&, mozilla::MarkerOptions&&, + mozilla::baseprofiler::markers::NoPayload); + +template mozilla::ProfileBufferBlockIndex AddMarkerToBuffer( + mozilla::ProfileChunkedBuffer&, const mozilla::ProfilerString8View&, + const mozilla::MarkerCategory&, mozilla::MarkerOptions&&, + mozilla::baseprofiler::markers::TextMarker, const std::string&); + +template mozilla::ProfileBufferBlockIndex profiler_add_marker( + const mozilla::ProfilerString8View&, const mozilla::MarkerCategory&, + mozilla::MarkerOptions&&, mozilla::baseprofiler::markers::TextMarker, + const std::string&); + +template mozilla::ProfileBufferBlockIndex profiler_add_marker( + const mozilla::ProfilerString8View&, const mozilla::MarkerCategory&, + mozilla::MarkerOptions&&, mozilla::baseprofiler::markers::TextMarker, + const nsCString&); + +template mozilla::ProfileBufferBlockIndex profiler_add_marker( + const mozilla::ProfilerString8View&, const mozilla::MarkerCategory&, + mozilla::MarkerOptions&&, mozilla::baseprofiler::markers::Tracing, + const mozilla::ProfilerString8View&); diff --git a/tools/profiler/core/RegisteredThread.cpp b/tools/profiler/core/RegisteredThread.cpp new file mode 100644 index 0000000000..ffbeb782da --- /dev/null +++ b/tools/profiler/core/RegisteredThread.cpp @@ -0,0 +1,129 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "RegisteredThread.h" + +#include "js/AllocationRecording.h" +#include "js/ProfilingStack.h" +#include "js/TraceLoggerAPI.h" + +RacyRegisteredThread::RacyRegisteredThread(int aThreadId) + : mProfilingStackOwner( + mozilla::MakeNotNull<RefPtr<mozilla::ProfilingStackOwner>>()), + mThreadId(aThreadId), + mSleep(AWAKE), + mIsBeingProfiled(false) { + MOZ_COUNT_CTOR(RacyRegisteredThread); +} + +RegisteredThread::RegisteredThread(ThreadInfo* aInfo, nsIThread* aThread, + void* aStackTop) + : mRacyRegisteredThread(aInfo->ThreadId()), + mPlatformData(AllocPlatformData(aInfo->ThreadId())), + mStackTop(aStackTop), + mThreadInfo(aInfo), + mThread(aThread), + mContext(nullptr), + mJSSampling(INACTIVE), + mJSFlags(0) { + MOZ_COUNT_CTOR(RegisteredThread); + + // NOTE: aThread can be null for the first thread, before the ThreadManager + // is initialized. + + // We don't have to guess on mac +#if defined(GP_OS_darwin) + pthread_t self = pthread_self(); + mStackTop = pthread_get_stackaddr_np(self); +#endif +} + +RegisteredThread::~RegisteredThread() { MOZ_COUNT_DTOR(RegisteredThread); } + +size_t RegisteredThread::SizeOfIncludingThis( + mozilla::MallocSizeOf aMallocSizeOf) const { + size_t n = aMallocSizeOf(this); + + // Measurement of the following members may be added later if DMD finds it + // is worthwhile: + // - mPlatformData + // + // The following members are not measured: + // - mThreadInfo: because it is non-owning + + return n; +} + +void RegisteredThread::GetRunningEventDelay(const mozilla::TimeStamp& aNow, + mozilla::TimeDuration& aDelay, + mozilla::TimeDuration& aRunning) { + if (mThread) { // can be null right at the start of a process + mozilla::TimeStamp start; + mThread->GetRunningEventDelay(&aDelay, &start); + if (!start.IsNull()) { + // Note: the timestamp used here will be from when we started to + // suspend and sample the thread; which is also the timestamp + // associated with the sample. + aRunning = aNow - start; + return; + } + } + aDelay = mozilla::TimeDuration(); + aRunning = mozilla::TimeDuration(); +} + +void RegisteredThread::SetJSContext(JSContext* aContext) { + // This function runs on-thread. + + MOZ_ASSERT(aContext && !mContext); + + mContext = aContext; + + // We give the JS engine a non-owning reference to the ProfilingStack. It's + // important that the JS engine doesn't touch this once the thread dies. + js::SetContextProfilingStack(aContext, + &RacyRegisteredThread().ProfilingStack()); +} + +void RegisteredThread::PollJSSampling() { + // This function runs on-thread. + + // We can't start/stop profiling until we have the thread's JSContext. + if (mContext) { + // It is possible for mJSSampling to go through the following sequences. + // + // - INACTIVE, ACTIVE_REQUESTED, INACTIVE_REQUESTED, INACTIVE + // + // - ACTIVE, INACTIVE_REQUESTED, ACTIVE_REQUESTED, ACTIVE + // + // Therefore, the if and else branches here aren't always interleaved. + // This is ok because the JS engine can handle that. + // + if (mJSSampling == ACTIVE_REQUESTED) { + mJSSampling = ACTIVE; + js::EnableContextProfilingStack(mContext, true); + if (JSTracerEnabled()) { + JS::StartTraceLogger(mContext); + } + if (JSAllocationsEnabled()) { + // TODO - This probability should not be hardcoded. See Bug 1547284. + JS::EnableRecordingAllocations(mContext, + profiler_add_js_allocation_marker, 0.01); + } + js::RegisterContextProfilingEventMarker(mContext, profiler_add_js_marker); + + } else if (mJSSampling == INACTIVE_REQUESTED) { + mJSSampling = INACTIVE; + js::EnableContextProfilingStack(mContext, false); + if (JSTracerEnabled()) { + JS::StopTraceLogger(mContext); + } + if (JSAllocationsEnabled()) { + JS::DisableRecordingAllocations(mContext); + } + } + } +} diff --git a/tools/profiler/core/RegisteredThread.h b/tools/profiler/core/RegisteredThread.h new file mode 100644 index 0000000000..bc569c5ae5 --- /dev/null +++ b/tools/profiler/core/RegisteredThread.h @@ -0,0 +1,283 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef RegisteredThread_h +#define RegisteredThread_h + +#include "platform.h" +#include "ThreadInfo.h" + +#include "mozilla/NotNull.h" +#include "mozilla/RefPtr.h" +#include "nsIEventTarget.h" +#include "nsIThread.h" + +namespace mozilla { +class ProfilingStackOwner; +} + +// This class contains the state for a single thread that is accessible without +// protection from gPSMutex in platform.cpp. Because there is no external +// protection against data races, it must provide internal protection. Hence +// the "Racy" prefix. +// +class RacyRegisteredThread final { + public: + explicit RacyRegisteredThread(int aThreadId); + + MOZ_COUNTED_DTOR(RacyRegisteredThread) + + void SetIsBeingProfiled(bool aIsBeingProfiled) { + mIsBeingProfiled = aIsBeingProfiled; + } + + bool IsBeingProfiled() const { return mIsBeingProfiled; } + + // This is called on every profiler restart. Put things that should happen at + // that time here. + void ReinitializeOnResume() { + // This is needed to cause an initial sample to be taken from sleeping + // threads that had been observed prior to the profiler stopping and + // restarting. Otherwise sleeping threads would not have any samples to + // copy forward while sleeping. + (void)mSleep.compareExchange(SLEEPING_OBSERVED, SLEEPING_NOT_OBSERVED); + } + + // This returns true for the second and subsequent calls in each sleep cycle. + bool CanDuplicateLastSampleDueToSleep() { + if (mSleep == AWAKE) { + return false; + } + + if (mSleep.compareExchange(SLEEPING_NOT_OBSERVED, SLEEPING_OBSERVED)) { + return false; + } + + return true; + } + + // Call this whenever the current thread sleeps. Calling it twice in a row + // without an intervening setAwake() call is an error. + void SetSleeping() { + MOZ_ASSERT(mSleep == AWAKE); + mSleep = SLEEPING_NOT_OBSERVED; + } + + // Call this whenever the current thread wakes. Calling it twice in a row + // without an intervening setSleeping() call is an error. + void SetAwake() { + MOZ_ASSERT(mSleep != AWAKE); + mSleep = AWAKE; + } + + bool IsSleeping() { return mSleep != AWAKE; } + + int ThreadId() const { return mThreadId; } + + class ProfilingStack& ProfilingStack() { + return mProfilingStackOwner->ProfilingStack(); + } + const class ProfilingStack& ProfilingStack() const { + return mProfilingStackOwner->ProfilingStack(); + } + + mozilla::ProfilingStackOwner& ProfilingStackOwner() { + return *mProfilingStackOwner; + } + + private: + mozilla::NotNull<RefPtr<mozilla::ProfilingStackOwner>> mProfilingStackOwner; + + // mThreadId contains the thread ID of the current thread. It is safe to read + // this from multiple threads concurrently, as it will never be mutated. + const int mThreadId; + + // mSleep tracks whether the thread is sleeping, and if so, whether it has + // been previously observed. This is used for an optimization: in some cases, + // when a thread is asleep, we duplicate the previous sample, which is + // cheaper than taking a new sample. + // + // mSleep is atomic because it is accessed from multiple threads. + // + // - It is written only by this thread, via setSleeping() and setAwake(). + // + // - It is read by SamplerThread::Run(). + // + // There are two cases where racing between threads can cause an issue. + // + // - If CanDuplicateLastSampleDueToSleep() returns false but that result is + // invalidated before being acted upon, we will take a full sample + // unnecessarily. This is additional work but won't cause any correctness + // issues. (In actual fact, this case is impossible. In order to go from + // CanDuplicateLastSampleDueToSleep() returning false to it returning true + // requires an intermediate call to it in order for mSleep to go from + // SLEEPING_NOT_OBSERVED to SLEEPING_OBSERVED.) + // + // - If CanDuplicateLastSampleDueToSleep() returns true but that result is + // invalidated before being acted upon -- i.e. the thread wakes up before + // DuplicateLastSample() is called -- we will duplicate the previous + // sample. This is inaccurate, but only slightly... we will effectively + // treat the thread as having slept a tiny bit longer than it really did. + // + // This latter inaccuracy could be avoided by moving the + // CanDuplicateLastSampleDueToSleep() check within the thread-freezing code, + // e.g. the section where Tick() is called. But that would reduce the + // effectiveness of the optimization because more code would have to be run + // before we can tell that duplication is allowed. + // + static const int AWAKE = 0; + static const int SLEEPING_NOT_OBSERVED = 1; + static const int SLEEPING_OBSERVED = 2; + mozilla::Atomic<int> mSleep; + + // Is this thread being profiled? (e.g., should markers be recorded?) + mozilla::Atomic<bool, mozilla::MemoryOrdering::Relaxed> mIsBeingProfiled; +}; + +// This class contains information that's relevant to a single thread only +// while that thread is running and registered with the profiler, but +// regardless of whether the profiler is running. All accesses to it are +// protected by the profiler state lock. +class RegisteredThread final { + public: + RegisteredThread(ThreadInfo* aInfo, nsIThread* aThread, void* aStackTop); + ~RegisteredThread(); + + class RacyRegisteredThread& RacyRegisteredThread() { + return mRacyRegisteredThread; + } + const class RacyRegisteredThread& RacyRegisteredThread() const { + return mRacyRegisteredThread; + } + + PlatformData* GetPlatformData() const { return mPlatformData.get(); } + const void* StackTop() const { return mStackTop; } + + // aDelay is the time the event that is currently running on the thread + // was queued before starting to run (if a PrioritizedEventQueue + // (i.e. MainThread), this will be 0 for any event at a lower priority + // than Input). + // aRunning is the time the event has been running. If no event is + // running these will both be TimeDuration() (i.e. 0). Both are out + // params, and are always set. Their initial value is discarded. + void GetRunningEventDelay(const mozilla::TimeStamp& aNow, + mozilla::TimeDuration& aDelay, + mozilla::TimeDuration& aRunning); + + size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + + // Set the JSContext of the thread to be sampled. Sampling cannot begin until + // this has been set. + void SetJSContext(JSContext* aContext); + + void ClearJSContext() { + // This function runs on-thread. + mContext = nullptr; + } + + JSContext* GetJSContext() const { return mContext; } + + const RefPtr<ThreadInfo> Info() const { return mThreadInfo; } + const nsCOMPtr<nsIEventTarget> GetEventTarget() const { return mThread; } + void ResetMainThread(nsIThread* aThread) { mThread = aThread; } + + // Request that this thread start JS sampling. JS sampling won't actually + // start until a subsequent PollJSSampling() call occurs *and* mContext has + // been set. + void StartJSSampling(uint32_t aJSFlags) { + // This function runs on-thread or off-thread. + + MOZ_RELEASE_ASSERT(mJSSampling == INACTIVE || + mJSSampling == INACTIVE_REQUESTED); + mJSSampling = ACTIVE_REQUESTED; + mJSFlags = aJSFlags; + } + + // Request that this thread stop JS sampling. JS sampling won't actually stop + // until a subsequent PollJSSampling() call occurs. + void StopJSSampling() { + // This function runs on-thread or off-thread. + + MOZ_RELEASE_ASSERT(mJSSampling == ACTIVE || + mJSSampling == ACTIVE_REQUESTED); + mJSSampling = INACTIVE_REQUESTED; + } + + // Poll to see if JS sampling should be started/stopped. + void PollJSSampling(); + + private: + class RacyRegisteredThread mRacyRegisteredThread; + + const UniquePlatformData mPlatformData; + const void* mStackTop; + + const RefPtr<ThreadInfo> mThreadInfo; + nsCOMPtr<nsIThread> mThread; + + // If this is a JS thread, this is its JSContext, which is required for any + // JS sampling. + JSContext* mContext; + + // The profiler needs to start and stop JS sampling of JS threads at various + // times. However, the JS engine can only do the required actions on the + // JS thread itself ("on-thread"), not from another thread ("off-thread"). + // Therefore, we have the following two-step process. + // + // - The profiler requests (on-thread or off-thread) that the JS sampling be + // started/stopped, by changing mJSSampling to the appropriate REQUESTED + // state. + // + // - The relevant JS thread polls (on-thread) for changes to mJSSampling. + // When it sees a REQUESTED state, it performs the appropriate actions to + // actually start/stop JS sampling, and changes mJSSampling out of the + // REQUESTED state. + // + // The state machine is as follows. + // + // INACTIVE --> ACTIVE_REQUESTED + // ^ ^ | + // | _/ | + // | _/ | + // | / | + // | v v + // INACTIVE_REQUESTED <-- ACTIVE + // + // The polling is done in the following two ways. + // + // - Via the interrupt callback mechanism; the JS thread must call + // profiler_js_interrupt_callback() from its own interrupt callback. + // This is how sampling must be started/stopped for threads where the + // request was made off-thread. + // + // - When {Start,Stop}JSSampling() is called on-thread, we can immediately + // follow it with a PollJSSampling() call to avoid the delay between the + // two steps. Likewise, setJSContext() calls PollJSSampling(). + // + // One non-obvious thing about all this: these JS sampling requests are made + // on all threads, even non-JS threads. mContext needs to also be set (via + // setJSContext(), which can only happen for JS threads) for any JS sampling + // to actually happen. + // + enum { + INACTIVE = 0, + ACTIVE_REQUESTED = 1, + ACTIVE = 2, + INACTIVE_REQUESTED = 3, + } mJSSampling; + + uint32_t mJSFlags; + + bool JSTracerEnabled() { + return mJSFlags & uint32_t(JSInstrumentationFlags::TraceLogging); + } + + bool JSAllocationsEnabled() { + return mJSFlags & uint32_t(JSInstrumentationFlags::Allocations); + } +}; + +#endif // RegisteredThread_h diff --git a/tools/profiler/core/ThreadInfo.h b/tools/profiler/core/ThreadInfo.h new file mode 100644 index 0000000000..e698bad765 --- /dev/null +++ b/tools/profiler/core/ThreadInfo.h @@ -0,0 +1,48 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ThreadInfo_h +#define ThreadInfo_h + +#include "mozilla/TimeStamp.h" +#include "nsISupportsImpl.h" +#include "nsString.h" + +// This class contains information about a thread which needs to be stored +// across restarts of the profiler and which can be useful even after the +// thread has stopped running. +// It uses threadsafe refcounting and only contains immutable data. +class ThreadInfo final { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ThreadInfo) + + ThreadInfo(const char* aName, int aThreadId, bool aIsMainThread, + const mozilla::TimeStamp& aRegisterTime = + mozilla::TimeStamp::NowUnfuzzed()) + : mName(aName), + mRegisterTime(aRegisterTime), + mThreadId(aThreadId), + mIsMainThread(aIsMainThread) { + // I don't know if we can assert this. But we should warn. + MOZ_ASSERT(aThreadId >= 0, "native thread ID is < 0"); + MOZ_ASSERT(aThreadId <= INT32_MAX, "native thread ID is > INT32_MAX"); + } + + const char* Name() const { return mName.get(); } + mozilla::TimeStamp RegisterTime() const { return mRegisterTime; } + int ThreadId() const { return mThreadId; } + bool IsMainThread() const { return mIsMainThread; } + + private: + ~ThreadInfo() {} + + const nsCString mName; + const mozilla::TimeStamp mRegisterTime; + const int mThreadId; + const bool mIsMainThread; +}; + +#endif // ThreadInfo_h diff --git a/tools/profiler/core/VTuneProfiler.cpp b/tools/profiler/core/VTuneProfiler.cpp new file mode 100644 index 0000000000..58a39c51ee --- /dev/null +++ b/tools/profiler/core/VTuneProfiler.cpp @@ -0,0 +1,80 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifdef XP_WIN +# undef UNICODE +# undef _UNICODE +#endif + +#include "VTuneProfiler.h" +#include "mozilla/Bootstrap.h" +#include <memory> + +VTuneProfiler* VTuneProfiler::mInstance = nullptr; + +void VTuneProfiler::Initialize() { + // This is just a 'dirty trick' to find out if the ittnotify DLL was found. + // If it wasn't this function always returns 0, otherwise it returns + // incrementing numbers, if the library was found this wastes 2 events but + // that should be okay. + __itt_event testEvent = + __itt_event_create("Test event", strlen("Test event")); + testEvent = __itt_event_create("Test event 2", strlen("Test event 2")); + + if (testEvent) { + mInstance = new VTuneProfiler(); + } +} + +void VTuneProfiler::Shutdown() {} + +void VTuneProfiler::TraceInternal(const char* aName, TracingKind aKind) { + std::string str(aName); + + auto iter = mStrings.find(str); + + __itt_event event; + if (iter != mStrings.end()) { + event = iter->second; + } else { + event = __itt_event_create(aName, str.length()); + mStrings.insert({str, event}); + } + + if (aKind == TRACING_INTERVAL_START || aKind == TRACING_EVENT) { + // VTune will consider starts not matched with an end to be single point in + // time events. + __itt_event_start(event); + } else { + __itt_event_end(event); + } +} + +void VTuneProfiler::RegisterThreadInternal(const char* aName) { + std::string str(aName); + + if (!str.compare("GeckoMain")) { + // Process main thread. + switch (XRE_GetProcessType()) { + case GeckoProcessType::GeckoProcessType_Default: + __itt_thread_set_name("Main Process"); + break; + case GeckoProcessType::GeckoProcessType_Content: + __itt_thread_set_name("Content Process"); + break; + case GeckoProcessType::GeckoProcessType_GMPlugin: + __itt_thread_set_name("Plugin Process"); + break; + case GeckoProcessType::GeckoProcessType_GPU: + __itt_thread_set_name("GPU Process"); + break; + default: + __itt_thread_set_name("Unknown Process"); + } + return; + } + __itt_thread_set_name(aName); +} diff --git a/tools/profiler/core/VTuneProfiler.h b/tools/profiler/core/VTuneProfiler.h new file mode 100644 index 0000000000..d5304653e4 --- /dev/null +++ b/tools/profiler/core/VTuneProfiler.h @@ -0,0 +1,72 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef VTuneProfiler_h +#define VTuneProfiler_h + +// The intent here is to add 0 overhead for regular users. In order to build +// the VTune profiler code at all --enable-vtune-instrumentation needs to be +// set as a build option. Even then, when none of the environment variables +// is specified that allow us to find the ittnotify DLL, these functions +// should be minimal overhead. When starting Firefox under VTune, these +// env vars will be automatically defined, otherwise INTEL_LIBITTNOTIFY32/64 +// should be set to point at the ittnotify DLL. +#ifndef MOZ_VTUNE_INSTRUMENTATION + +# define VTUNE_INIT() +# define VTUNE_SHUTDOWN() + +# define VTUNE_TRACING(name, kind) +# define VTUNE_REGISTER_THREAD(name) + +#else + +# include "GeckoProfiler.h" + +// This is the regular Intel header, these functions are actually defined for +// us inside js/src/vtune by an intel C file which actually dynamically resolves +// them to the correct DLL. Through libxul these will 'magically' resolve. +# include "vtune/ittnotify.h" + +# include <stddef.h> +# include <unordered_map> +# include <string> + +class VTuneProfiler { + public: + static void Initialize(); + static void Shutdown(); + + static void Trace(const char* aName, TracingKind aKind) { + if (mInstance) { + mInstance->TraceInternal(aName, aKind); + } + } + static void RegisterThread(const char* aName) { + if (mInstance) { + mInstance->RegisterThreadInternal(aName); + } + } + + private: + void TraceInternal(const char* aName, TracingKind aKind); + void RegisterThreadInternal(const char* aName); + + // This is null when the ittnotify DLL could not be found. + static VTuneProfiler* mInstance; + + std::unordered_map<std::string, __itt_event> mStrings; +}; + +# define VTUNE_INIT() VTuneProfiler::Initialize() +# define VTUNE_SHUTDOWN() VTuneProfiler::Shutdown() + +# define VTUNE_TRACING(name, kind) VTuneProfiler::Trace(name, kind) +# define VTUNE_REGISTER_THREAD(name) VTuneProfiler::RegisterThread(name) + +#endif + +#endif /* VTuneProfiler_h */ diff --git a/tools/profiler/core/memory_hooks.cpp b/tools/profiler/core/memory_hooks.cpp new file mode 100644 index 0000000000..51f6425ba8 --- /dev/null +++ b/tools/profiler/core/memory_hooks.cpp @@ -0,0 +1,598 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "memory_hooks.h" + +#include "nscore.h" + +#include "mozilla/Assertions.h" +#include "mozilla/Atomics.h" +#include "mozilla/FastBernoulliTrial.h" +#include "mozilla/IntegerPrintfMacros.h" +#include "mozilla/JSONWriter.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/PlatformMutex.h" +#include "mozilla/ProfilerCounts.h" +#include "mozilla/ThreadLocal.h" + +#include "GeckoProfiler.h" +#include "prenv.h" +#include "replace_malloc.h" + +#include <ctype.h> +#include <errno.h> +#include <limits.h> +#include <stdarg.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#ifdef XP_WIN +# include <windows.h> +# include <process.h> +#else +# include <pthread.h> +# include <sys/types.h> +# include <unistd.h> +#endif + +#ifdef ANDROID +# include <android/log.h> +#endif + +// The counters start out as a nullptr, and then get initialized only once. They +// are never destroyed, as it would cause race conditions for the memory hooks +// that use the counters. This helps guard against potentially expensive +// operations like using a mutex. +// +// In addition, this is a raw pointer and not a UniquePtr, as the counter +// machinery will try and de-register itself from the profiler. This could +// happen after the profiler and its PSMutex was already destroyed, resulting in +// a crash. +static ProfilerCounterTotal* sCounter; + +// The gBernoulli value starts out as a nullptr, and only gets initialized once. +// It then lives for the entire lifetime of the process. It cannot be deleted +// without additional multi-threaded protections, since if we deleted it during +// profiler_stop then there could be a race between threads already in a +// memory hook that might try to access the value after or during deletion. +static mozilla::FastBernoulliTrial* gBernoulli; + +namespace mozilla::profiler { + +//--------------------------------------------------------------------------- +// Utilities +//--------------------------------------------------------------------------- + +static malloc_table_t gMallocTable; + +// This is only needed because of the |const void*| vs |void*| arg mismatch. +static size_t MallocSizeOf(const void* aPtr) { + return gMallocTable.malloc_usable_size(const_cast<void*>(aPtr)); +} + +// The values for the Bernoulli trial are taken from DMD. According to DMD: +// +// In testing, a probability of 0.003 resulted in ~25% of heap blocks getting +// a stack trace and ~80% of heap bytes getting a stack trace. (This is +// possible because big heap blocks are more likely to get a stack trace.) +// +// The random number seeds are arbitrary and were obtained from random.org. +// +// However this value resulted in a lot of slowdown since the profiler stacks +// are pretty heavy to collect. The value was lowered to 10% of the original to +// 0.0003. +static void EnsureBernoulliIsInstalled() { + if (!gBernoulli) { + // This is only installed once. See the gBernoulli definition for more + // information. + gBernoulli = + new FastBernoulliTrial(0.0003, 0x8e26eeee166bc8ca, 0x56820f304a9c9ae0); + } +} + +// This class provides infallible allocations (they abort on OOM) like +// mozalloc's InfallibleAllocPolicy, except that memory hooks are bypassed. This +// policy is used by the HashSet. +class InfallibleAllocWithoutHooksPolicy { + static void ExitOnFailure(const void* aP) { + if (!aP) { + MOZ_CRASH("Profiler memory hooks out of memory; aborting"); + } + } + + public: + template <typename T> + static T* maybe_pod_malloc(size_t aNumElems) { + if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value) { + return nullptr; + } + return (T*)gMallocTable.malloc(aNumElems * sizeof(T)); + } + + template <typename T> + static T* maybe_pod_calloc(size_t aNumElems) { + return (T*)gMallocTable.calloc(aNumElems, sizeof(T)); + } + + template <typename T> + static T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) { + if (aNewSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value) { + return nullptr; + } + return (T*)gMallocTable.realloc(aPtr, aNewSize * sizeof(T)); + } + + template <typename T> + static T* pod_malloc(size_t aNumElems) { + T* p = maybe_pod_malloc<T>(aNumElems); + ExitOnFailure(p); + return p; + } + + template <typename T> + static T* pod_calloc(size_t aNumElems) { + T* p = maybe_pod_calloc<T>(aNumElems); + ExitOnFailure(p); + return p; + } + + template <typename T> + static T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) { + T* p = maybe_pod_realloc(aPtr, aOldSize, aNewSize); + ExitOnFailure(p); + return p; + } + + template <typename T> + static void free_(T* aPtr, size_t aSize = 0) { + gMallocTable.free(aPtr); + } + + static void reportAllocOverflow() { ExitOnFailure(nullptr); } + bool checkSimulatedOOM() const { return true; } +}; + +// We can't use mozilla::Mutex because it causes re-entry into the memory hooks. +// Define a custom implementation here. +class Mutex : private ::mozilla::detail::MutexImpl { + public: + Mutex() : ::mozilla::detail::MutexImpl() {} + + void Lock() { ::mozilla::detail::MutexImpl::lock(); } + void Unlock() { ::mozilla::detail::MutexImpl::unlock(); } +}; + +class MutexAutoLock { + MutexAutoLock(const MutexAutoLock&) = delete; + void operator=(const MutexAutoLock&) = delete; + + Mutex& mMutex; + + public: + explicit MutexAutoLock(Mutex& aMutex) : mMutex(aMutex) { mMutex.Lock(); } + ~MutexAutoLock() { mMutex.Unlock(); } +}; + +//--------------------------------------------------------------------------- +// Tracked allocations +//--------------------------------------------------------------------------- + +// The allocation tracker is shared between multiple threads, and is the +// coordinator for knowing when allocations have been tracked. The mutable +// internal state is protected by a mutex, and managed by the methods. +// +// The tracker knows about all the allocations that we have added to the +// profiler. This way, whenever any given piece of memory is freed, we can see +// if it was previously tracked, and we can track its deallocation. + +class AllocationTracker { + // This type tracks all of the allocations that we have captured. This way, we + // can see if a deallocation is inside of this set. We want to provide a + // balanced view into the allocations and deallocations. + typedef mozilla::HashSet<const void*, mozilla::DefaultHasher<const void*>, + InfallibleAllocWithoutHooksPolicy> + AllocationSet; + + public: + AllocationTracker() : mAllocations(), mMutex() {} + + void AddMemoryAddress(const void* memoryAddress) { + MutexAutoLock lock(mMutex); + if (!mAllocations.put(memoryAddress)) { + MOZ_CRASH("Out of memory while tracking native allocations."); + }; + } + + void Reset() { + MutexAutoLock lock(mMutex); + mAllocations.clearAndCompact(); + } + + // Returns true when the memory address is found and removed, otherwise that + // memory address is not being tracked and it returns false. + bool RemoveMemoryAddressIfFound(const void* memoryAddress) { + MutexAutoLock lock(mMutex); + + auto ptr = mAllocations.lookup(memoryAddress); + if (ptr) { + // The memory was present. It no longer needs to be tracked. + mAllocations.remove(ptr); + return true; + } + + return false; + } + + private: + AllocationSet mAllocations; + Mutex mMutex; +}; + +static AllocationTracker* gAllocationTracker; + +static void EnsureAllocationTrackerIsInstalled() { + if (!gAllocationTracker) { + // This is only installed once. + gAllocationTracker = new AllocationTracker(); + } +} + +//--------------------------------------------------------------------------- +// Per-thread blocking of intercepts +//--------------------------------------------------------------------------- + +// On MacOS, and Linux the first __thread/thread_local access calls malloc, +// which leads to an infinite loop. So we use pthread-based TLS instead, which +// somehow doesn't have this problem. +#if !defined(XP_DARWIN) && !defined(XP_LINUX) +# define PROFILER_THREAD_LOCAL(T) MOZ_THREAD_LOCAL(T) +#else +# define PROFILER_THREAD_LOCAL(T) \ + ::mozilla::detail::ThreadLocal<T, ::mozilla::detail::ThreadLocalKeyStorage> +#endif + +class ThreadIntercept { + // When set to true, malloc does not intercept additional allocations. This is + // needed because collecting stacks creates new allocations. When blocked, + // these allocations are then ignored by the memory hook. + static PROFILER_THREAD_LOCAL(bool) tlsIsBlocked; + + // This is a quick flag to check and see if the allocations feature is enabled + // or disabled. + static mozilla::Atomic<bool, mozilla::Relaxed> sAllocationsFeatureEnabled; + + ThreadIntercept() = default; + + // Only allow consumers to access this information if they run + // ThreadIntercept::MaybeGet and ask through the non-static version. + static bool IsBlocked_() { + // When the native allocations feature is turned on, memory hooks run on + // every single allocation. For a subset of these allocations, the stack + // gets sampled by running profiler_get_backtrace(), which locks the + // profiler mutex. + // + // An issue arises when allocating while the profiler is locked FOR THE + // CURRENT THREAD. In this situation, if profiler_get_backtrace() were to be + // run, it would try to re-acquire this lock and deadlock. In order to guard + // against this deadlock, we check to see if the mutex is already locked by + // this thread. + return tlsIsBlocked.get() || profiler_is_locked_on_current_thread(); + } + + public: + static void Init() { tlsIsBlocked.infallibleInit(); } + + // The ThreadIntercept object can only be created through this method, the + // constructor is private. This is so that we only check to see if the native + // allocations are turned on once, and not multiple times through the calls. + // The feature maybe be toggled between different stages of the memory hook. + static Maybe<ThreadIntercept> MaybeGet() { + if (sAllocationsFeatureEnabled && !ThreadIntercept::IsBlocked_()) { + // Only return thread intercepts when the native allocations feature is + // enabled and we aren't blocked. + return Some(ThreadIntercept()); + } + return Nothing(); + } + + void Block() { + MOZ_ASSERT(!tlsIsBlocked.get()); + tlsIsBlocked.set(true); + } + + void Unblock() { + MOZ_ASSERT(tlsIsBlocked.get()); + tlsIsBlocked.set(false); + } + + bool IsBlocked() const { return ThreadIntercept::IsBlocked_(); } + + static void EnableAllocationFeature() { sAllocationsFeatureEnabled = true; } + + static void DisableAllocationFeature() { sAllocationsFeatureEnabled = false; } +}; + +PROFILER_THREAD_LOCAL(bool) ThreadIntercept::tlsIsBlocked; + +mozilla::Atomic<bool, mozilla::Relaxed> + ThreadIntercept::sAllocationsFeatureEnabled(false); + +// An object of this class must be created (on the stack) before running any +// code that might allocate. +class AutoBlockIntercepts { + ThreadIntercept& mThreadIntercept; + + public: + // Disallow copy and assign. + AutoBlockIntercepts(const AutoBlockIntercepts&) = delete; + void operator=(const AutoBlockIntercepts&) = delete; + + explicit AutoBlockIntercepts(ThreadIntercept& aThreadIntercept) + : mThreadIntercept(aThreadIntercept) { + mThreadIntercept.Block(); + } + ~AutoBlockIntercepts() { + MOZ_ASSERT(mThreadIntercept.IsBlocked()); + mThreadIntercept.Unblock(); + } +}; + +//--------------------------------------------------------------------------- +// malloc/free callbacks +//--------------------------------------------------------------------------- + +static void AllocCallback(void* aPtr, size_t aReqSize) { + if (!aPtr) { + return; + } + + // The first part of this function does not allocate. + size_t actualSize = gMallocTable.malloc_usable_size(aPtr); + if (actualSize > 0) { + sCounter->Add(actualSize); + } + + auto threadIntercept = ThreadIntercept::MaybeGet(); + if (threadIntercept.isNothing()) { + // Either the native allocations feature is not turned on, or we may be + // recursing into a memory hook, return. We'll still collect counter + // information about this allocation, but no stack. + return; + } + + // The next part of the function requires allocations, so block the memory + // hooks from recursing on any new allocations coming in. + AutoBlockIntercepts block(threadIntercept.ref()); + + // Perform a bernoulli trial, which will return true or false based on its + // configured probability. It takes into account the byte size so that + // larger allocations are weighted heavier than smaller allocations. + MOZ_ASSERT(gBernoulli, + "gBernoulli must be properly installed for the memory hooks."); + if ( + // First perform the Bernoulli trial. + gBernoulli->trial(actualSize) && + // Second, attempt to add a marker if the Bernoulli trial passed. + profiler_add_native_allocation_marker( + static_cast<int64_t>(actualSize), + reinterpret_cast<uintptr_t>(aPtr))) { + MOZ_ASSERT(gAllocationTracker, + "gAllocationTracker must be properly installed for the memory " + "hooks."); + // Only track the memory if the allocation marker was actually added to the + // profiler. + gAllocationTracker->AddMemoryAddress(aPtr); + } + + // We're ignoring aReqSize here +} + +static void FreeCallback(void* aPtr) { + if (!aPtr) { + return; + } + + // The first part of this function does not allocate. + size_t unsignedSize = MallocSizeOf(aPtr); + int64_t signedSize = -(static_cast<int64_t>(unsignedSize)); + sCounter->Add(signedSize); + + auto threadIntercept = ThreadIntercept::MaybeGet(); + if (threadIntercept.isNothing()) { + // Either the native allocations feature is not turned on, or we may be + // recursing into a memory hook, return. We'll still collect counter + // information about this allocation, but no stack. + return; + } + + // The next part of the function requires allocations, so block the memory + // hooks from recursing on any new allocations coming in. + AutoBlockIntercepts block(threadIntercept.ref()); + + // Perform a bernoulli trial, which will return true or false based on its + // configured probability. It takes into account the byte size so that + // larger allocations are weighted heavier than smaller allocations. + MOZ_ASSERT( + gAllocationTracker, + "gAllocationTracker must be properly installed for the memory hooks."); + if (gAllocationTracker->RemoveMemoryAddressIfFound(aPtr)) { + // This size here is negative, indicating a deallocation. + profiler_add_native_allocation_marker(signedSize, + reinterpret_cast<uintptr_t>(aPtr)); + } +} + +} // namespace mozilla::profiler + +//--------------------------------------------------------------------------- +// malloc/free interception +//--------------------------------------------------------------------------- + +using namespace mozilla::profiler; + +static void* replace_malloc(size_t aSize) { + // This must be a call to malloc from outside. Intercept it. + void* ptr = gMallocTable.malloc(aSize); + AllocCallback(ptr, aSize); + return ptr; +} + +static void* replace_calloc(size_t aCount, size_t aSize) { + void* ptr = gMallocTable.calloc(aCount, aSize); + AllocCallback(ptr, aCount * aSize); + return ptr; +} + +static void* replace_realloc(void* aOldPtr, size_t aSize) { + // If |aOldPtr| is nullptr, the call is equivalent to |malloc(aSize)|. + if (!aOldPtr) { + return replace_malloc(aSize); + } + + FreeCallback(aOldPtr); + void* ptr = gMallocTable.realloc(aOldPtr, aSize); + if (ptr) { + AllocCallback(ptr, aSize); + } else { + // If realloc fails, we undo the prior operations by re-inserting the old + // pointer into the live block table. We don't have to do anything with the + // dead block list because the dead block hasn't yet been inserted. The + // block will end up looking like it was allocated for the first time here, + // which is untrue, and the slop bytes will be zero, which may be untrue. + // But this case is rare and doing better isn't worth the effort. + AllocCallback(aOldPtr, gMallocTable.malloc_usable_size(aOldPtr)); + } + return ptr; +} + +static void* replace_memalign(size_t aAlignment, size_t aSize) { + void* ptr = gMallocTable.memalign(aAlignment, aSize); + AllocCallback(ptr, aSize); + return ptr; +} + +static void replace_free(void* aPtr) { + FreeCallback(aPtr); + gMallocTable.free(aPtr); +} + +static void* replace_moz_arena_malloc(arena_id_t aArena, size_t aSize) { + void* ptr = gMallocTable.moz_arena_malloc(aArena, aSize); + AllocCallback(ptr, aSize); + return ptr; +} + +static void* replace_moz_arena_calloc(arena_id_t aArena, size_t aCount, + size_t aSize) { + void* ptr = gMallocTable.moz_arena_calloc(aArena, aCount, aSize); + AllocCallback(ptr, aCount * aSize); + return ptr; +} + +static void* replace_moz_arena_realloc(arena_id_t aArena, void* aPtr, + size_t aSize) { + void* ptr = gMallocTable.moz_arena_realloc(aArena, aPtr, aSize); + AllocCallback(ptr, aSize); + return ptr; +} + +static void replace_moz_arena_free(arena_id_t aArena, void* aPtr) { + FreeCallback(aPtr); + gMallocTable.moz_arena_free(aArena, aPtr); +} + +static void* replace_moz_arena_memalign(arena_id_t aArena, size_t aAlignment, + size_t aSize) { + void* ptr = gMallocTable.moz_arena_memalign(aArena, aAlignment, aSize); + AllocCallback(ptr, aSize); + return ptr; +} + +// we have to replace these or jemalloc will assume we don't implement any +// of the arena replacements! +static arena_id_t replace_moz_create_arena_with_params( + arena_params_t* aParams) { + return gMallocTable.moz_create_arena_with_params(aParams); +} + +static void replace_moz_dispose_arena(arena_id_t aArenaId) { + return gMallocTable.moz_dispose_arena(aArenaId); +} + +// Must come after all the replace_* funcs +void replace_init(malloc_table_t* aMallocTable, ReplaceMallocBridge** aBridge) { + gMallocTable = *aMallocTable; +#define MALLOC_FUNCS (MALLOC_FUNCS_MALLOC_BASE | MALLOC_FUNCS_ARENA) +#define MALLOC_DECL(name, ...) aMallocTable->name = replace_##name; +#include "malloc_decls.h" +} + +void profiler_replace_remove() {} + +namespace mozilla::profiler { +//--------------------------------------------------------------------------- +// Initialization +//--------------------------------------------------------------------------- + +void install_memory_hooks() { + if (!sCounter) { + sCounter = new ProfilerCounterTotal("malloc", "Memory", + "Amount of allocated memory"); + // Also initialize the ThreadIntercept, even if native allocation tracking + // won't be turned on. This way the TLS will be initialized. + ThreadIntercept::Init(); + } + jemalloc_replace_dynamic(replace_init); +} + +// Remove the hooks, but leave the sCounter machinery. Deleting the counter +// would race with any existing memory hooks that are currently running. Rather +// than adding overhead here of mutexes it's cheaper for the performance to just +// leak these values. +void remove_memory_hooks() { jemalloc_replace_dynamic(nullptr); } + +void enable_native_allocations() { + // The bloat log tracks allocations and deallocations. This can conflict + // with the memory hook machinery, as the bloat log creates its own + // allocations. This means we can re-enter inside the bloat log machinery. At + // this time, the bloat log does not know about cannot handle the native + // allocation feature. + // + // At the time of this writing, we hit this assertion: + // IsIdle(oldState) || IsRead(oldState) in Checker::StartReadOp() + // + // #01: GetBloatEntry(char const*, unsigned int) + // #02: NS_LogCtor + // #03: profiler_get_backtrace() + // #04: profiler_add_native_allocation_marker(long long) + // #05: mozilla::profiler::AllocCallback(void*, unsigned long) + // #06: replace_calloc(unsigned long, unsigned long) + // #07: PLDHashTable::ChangeTable(int) + // #08: PLDHashTable::Add(void const*, std::nothrow_t const&) + // #09: nsBaseHashtable<nsDepCharHashKey, nsAutoPtr<BloatEntry>, ... + // #10: GetBloatEntry(char const*, unsigned int) + // #11: NS_LogCtor + // #12: profiler_get_backtrace() + // ... + MOZ_ASSERT(!PR_GetEnv("XPCOM_MEM_BLOAT_LOG"), + "The bloat log feature is not compatible with the native " + "allocations instrumentation."); + + EnsureBernoulliIsInstalled(); + EnsureAllocationTrackerIsInstalled(); + ThreadIntercept::EnableAllocationFeature(); +} + +// This is safe to call even if native allocations hasn't been enabled. +void disable_native_allocations() { + ThreadIntercept::DisableAllocationFeature(); + if (gAllocationTracker) { + gAllocationTracker->Reset(); + } +} + +} // namespace mozilla::profiler diff --git a/tools/profiler/core/memory_hooks.h b/tools/profiler/core/memory_hooks.h new file mode 100644 index 0000000000..7a117ec15a --- /dev/null +++ b/tools/profiler/core/memory_hooks.h @@ -0,0 +1,23 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef memory_hooks_h +#define memory_hooks_h + +#if defined(MOZ_REPLACE_MALLOC) && defined(MOZ_PROFILER_MEMORY) +namespace mozilla { +namespace profiler { + +void install_memory_hooks(); +void remove_memory_hooks(); +void enable_native_allocations(); +void disable_native_allocations(); + +} // namespace profiler +} // namespace mozilla +#endif + +#endif diff --git a/tools/profiler/core/platform-linux-android.cpp b/tools/profiler/core/platform-linux-android.cpp new file mode 100644 index 0000000000..28561932af --- /dev/null +++ b/tools/profiler/core/platform-linux-android.cpp @@ -0,0 +1,609 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +// Copyright (c) 2006-2011 The Chromium Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google, Inc. nor the names of its contributors +// may be used to endorse or promote products derived from this +// software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +// SUCH DAMAGE. + +// This file is used for both Linux and Android as well as FreeBSD. + +#include <stdio.h> +#include <math.h> + +#include <pthread.h> +#if defined(GP_OS_freebsd) +# include <sys/thr.h> +#endif +#include <semaphore.h> +#include <signal.h> +#include <sys/time.h> +#include <sys/resource.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <stdlib.h> +#include <sched.h> +#include <ucontext.h> +// Ubuntu Dapper requires memory pages to be marked as +// executable. Otherwise, OS raises an exception when executing code +// in that page. +#include <sys/types.h> // mmap & munmap +#include <sys/mman.h> // mmap & munmap +#include <sys/stat.h> // open +#include <fcntl.h> // open +#include <unistd.h> // sysconf +#include <semaphore.h> +#ifdef __GLIBC__ +# include <execinfo.h> // backtrace, backtrace_symbols +#endif // def __GLIBC__ +#include <strings.h> // index +#include <errno.h> +#include <stdarg.h> + +#include "prenv.h" +#include "mozilla/PodOperations.h" +#include "mozilla/DebugOnly.h" +#if defined(GP_OS_linux) || defined(GP_OS_android) +# include "common/linux/breakpad_getcontext.h" +#endif + +#include <string.h> +#include <list> + +using namespace mozilla; + +int profiler_current_process_id() { return getpid(); } + +int profiler_current_thread_id() { +#if defined(GP_OS_linux) || defined(GP_OS_android) + // glibc doesn't provide a wrapper for gettid(). + return static_cast<int>(static_cast<pid_t>(syscall(SYS_gettid))); +#elif defined(GP_OS_freebsd) + long id; + (void)thr_self(&id); + return static_cast<int>(id); +#else +# error "bad platform" +#endif +} + +void* GetStackTop(void* aGuess) { return aGuess; } + +static void PopulateRegsFromContext(Registers& aRegs, ucontext_t* aContext) { + aRegs.mContext = aContext; + mcontext_t& mcontext = aContext->uc_mcontext; + + // Extracting the sample from the context is extremely machine dependent. +#if defined(GP_PLAT_x86_linux) || defined(GP_PLAT_x86_android) + aRegs.mPC = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]); + aRegs.mSP = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]); + aRegs.mFP = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]); + aRegs.mLR = 0; +#elif defined(GP_PLAT_amd64_linux) || defined(GP_PLAT_amd64_android) + aRegs.mPC = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]); + aRegs.mSP = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]); + aRegs.mFP = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]); + aRegs.mLR = 0; +#elif defined(GP_PLAT_amd64_freebsd) + aRegs.mPC = reinterpret_cast<Address>(mcontext.mc_rip); + aRegs.mSP = reinterpret_cast<Address>(mcontext.mc_rsp); + aRegs.mFP = reinterpret_cast<Address>(mcontext.mc_rbp); + aRegs.mLR = 0; +#elif defined(GP_PLAT_arm_linux) || defined(GP_PLAT_arm_android) + aRegs.mPC = reinterpret_cast<Address>(mcontext.arm_pc); + aRegs.mSP = reinterpret_cast<Address>(mcontext.arm_sp); + aRegs.mFP = reinterpret_cast<Address>(mcontext.arm_fp); + aRegs.mLR = reinterpret_cast<Address>(mcontext.arm_lr); +#elif defined(GP_PLAT_arm64_linux) || defined(GP_PLAT_arm64_android) + aRegs.mPC = reinterpret_cast<Address>(mcontext.pc); + aRegs.mSP = reinterpret_cast<Address>(mcontext.sp); + aRegs.mFP = reinterpret_cast<Address>(mcontext.regs[29]); + aRegs.mLR = reinterpret_cast<Address>(mcontext.regs[30]); +#elif defined(GP_PLAT_arm64_freebsd) + aRegs.mPC = reinterpret_cast<Address>(mcontext.mc_gpregs.gp_elr); + aRegs.mSP = reinterpret_cast<Address>(mcontext.mc_gpregs.gp_sp); + aRegs.mFP = reinterpret_cast<Address>(mcontext.mc_gpregs.gp_x[29]); + aRegs.mLR = reinterpret_cast<Address>(mcontext.mc_gpregs.gp_lr); +#elif defined(GP_PLAT_mips64_linux) || defined(GP_PLAT_mips64_android) + aRegs.mPC = reinterpret_cast<Address>(mcontext.pc); + aRegs.mSP = reinterpret_cast<Address>(mcontext.gregs[29]); + aRegs.mFP = reinterpret_cast<Address>(mcontext.gregs[30]); + +#else +# error "bad platform" +#endif +} + +#if defined(GP_OS_android) +# define SYS_tgkill __NR_tgkill +#endif + +#if defined(GP_OS_linux) || defined(GP_OS_android) +int tgkill(pid_t tgid, pid_t tid, int signalno) { + return syscall(SYS_tgkill, tgid, tid, signalno); +} +#endif + +#if defined(GP_OS_freebsd) +# define tgkill thr_kill2 +#endif + +class PlatformData { + public: + explicit PlatformData(int aThreadId) { + MOZ_ASSERT(aThreadId == profiler_current_thread_id()); + MOZ_COUNT_CTOR(PlatformData); + if (clockid_t clockid; + pthread_getcpuclockid(pthread_self(), &clockid) == 0) { + mClockId = Some(clockid); + } + } + + MOZ_COUNTED_DTOR(PlatformData) + + // Clock Id for this profiled thread. `Nothing` if `pthread_getcpuclockid` + // failed (e.g., if the system doesn't support per-thread clocks). + Maybe<clockid_t> GetClockId() const { return mClockId; } + + RunningTimes& PreviousThreadRunningTimesRef() { + return mPreviousThreadRunningTimes; + } + + private: + Maybe<clockid_t> mClockId; + RunningTimes mPreviousThreadRunningTimes; +}; + +//////////////////////////////////////////////////////////////////////// +// BEGIN Sampler target specifics + +// The only way to reliably interrupt a Linux thread and inspect its register +// and stack state is by sending a signal to it, and doing the work inside the +// signal handler. But we don't want to run much code inside the signal +// handler, since POSIX severely restricts what we can do in signal handlers. +// So we use a system of semaphores to suspend the thread and allow the +// sampler thread to do all the work of unwinding and copying out whatever +// data it wants. +// +// A four-message protocol is used to reliably suspend and later resume the +// thread to be sampled (the samplee): +// +// Sampler (signal sender) thread Samplee (thread to be sampled) +// +// Prepare the SigHandlerCoordinator +// and point sSigHandlerCoordinator at it +// +// send SIGPROF to samplee ------- MSG 1 ----> (enter signal handler) +// wait(mMessage2) Copy register state +// into sSigHandlerCoordinator +// <------ MSG 2 ----- post(mMessage2) +// Samplee is now suspended. wait(mMessage3) +// Examine its stack/register +// state at leisure +// +// Release samplee: +// post(mMessage3) ------- MSG 3 -----> +// wait(mMessage4) Samplee now resumes. Tell +// the sampler that we are done. +// <------ MSG 4 ------ post(mMessage4) +// Now we know the samplee's signal (leave signal handler) +// handler has finished using +// sSigHandlerCoordinator. We can +// safely reuse it for some other thread. +// + +// A type used to coordinate between the sampler (signal sending) thread and +// the thread currently being sampled (the samplee, which receives the +// signals). +// +// The first message is sent using a SIGPROF signal delivery. The subsequent +// three are sent using sem_wait/sem_post pairs. They are named accordingly +// in the following struct. +struct SigHandlerCoordinator { + SigHandlerCoordinator() { + PodZero(&mUContext); + int r = sem_init(&mMessage2, /* pshared */ 0, 0); + r |= sem_init(&mMessage3, /* pshared */ 0, 0); + r |= sem_init(&mMessage4, /* pshared */ 0, 0); + MOZ_ASSERT(r == 0); + } + + ~SigHandlerCoordinator() { + int r = sem_destroy(&mMessage2); + r |= sem_destroy(&mMessage3); + r |= sem_destroy(&mMessage4); + MOZ_ASSERT(r == 0); + } + + sem_t mMessage2; // To sampler: "context is in sSigHandlerCoordinator" + sem_t mMessage3; // To samplee: "resume" + sem_t mMessage4; // To sampler: "finished with sSigHandlerCoordinator" + ucontext_t mUContext; // Context at signal +}; + +struct SigHandlerCoordinator* Sampler::sSigHandlerCoordinator = nullptr; + +static void SigprofHandler(int aSignal, siginfo_t* aInfo, void* aContext) { + // Avoid TSan warning about clobbering errno. + int savedErrno = errno; + + MOZ_ASSERT(aSignal == SIGPROF); + MOZ_ASSERT(Sampler::sSigHandlerCoordinator); + + // By sending us this signal, the sampler thread has sent us message 1 in + // the comment above, with the meaning "|sSigHandlerCoordinator| is ready + // for use, please copy your register context into it." + Sampler::sSigHandlerCoordinator->mUContext = + *static_cast<ucontext_t*>(aContext); + + // Send message 2: tell the sampler thread that the context has been copied + // into |sSigHandlerCoordinator->mUContext|. sem_post can never fail by + // being interrupted by a signal, so there's no loop around this call. + int r = sem_post(&Sampler::sSigHandlerCoordinator->mMessage2); + MOZ_ASSERT(r == 0); + + // At this point, the sampler thread assumes we are suspended, so we must + // not touch any global state here. + + // Wait for message 3: the sampler thread tells us to resume. + while (true) { + r = sem_wait(&Sampler::sSigHandlerCoordinator->mMessage3); + if (r == -1 && errno == EINTR) { + // Interrupted by a signal. Try again. + continue; + } + // We don't expect any other kind of failure + MOZ_ASSERT(r == 0); + break; + } + + // Send message 4: tell the sampler thread that we are finished accessing + // |sSigHandlerCoordinator|. After this point it is not safe to touch + // |sSigHandlerCoordinator|. + r = sem_post(&Sampler::sSigHandlerCoordinator->mMessage4); + MOZ_ASSERT(r == 0); + + errno = savedErrno; +} + +Sampler::Sampler(PSLockRef aLock) + : mMyPid(profiler_current_process_id()) + // We don't know what the sampler thread's ID will be until it runs, so + // set mSamplerTid to a dummy value and fill it in for real in + // SuspendAndSampleAndResumeThread(). + , + mSamplerTid(-1) { +#if defined(USE_EHABI_STACKWALK) + mozilla::EHABIStackWalkInit(); +#endif + + // NOTE: We don't initialize LUL here, instead initializing it in + // SamplerThread's constructor. This is because with the + // profiler_suspend_and_sample_thread entry point, we want to be able to + // sample without waiting for LUL to be initialized. + + // Request profiling signals. + struct sigaction sa; + sa.sa_sigaction = SigprofHandler; + sigemptyset(&sa.sa_mask); + sa.sa_flags = SA_RESTART | SA_SIGINFO; + if (sigaction(SIGPROF, &sa, &mOldSigprofHandler) != 0) { + MOZ_CRASH("Error installing SIGPROF handler in the profiler"); + } +} + +void Sampler::Disable(PSLockRef aLock) { + // Restore old signal handler. This is global state so it's important that + // we do it now, while gPSMutex is locked. + sigaction(SIGPROF, &mOldSigprofHandler, 0); +} + +static void StreamMetaPlatformSampleUnits(PSLockRef aLock, + SpliceableJSONWriter& aWriter) { + aWriter.StringProperty("threadCPUDelta", "ns"); +} + +static RunningTimes GetThreadRunningTimesDiff( + PSLockRef aLock, const RegisteredThread& aRegisteredThread) { + AUTO_PROFILER_STATS(GetRunningTimes_clock_gettime_thread); + + PlatformData* platformData = aRegisteredThread.GetPlatformData(); + MOZ_RELEASE_ASSERT(platformData); + Maybe<clockid_t> maybeCid = platformData->GetClockId(); + + if (MOZ_UNLIKELY(!maybeCid)) { + // No clock id -> Nothing to measure apart from the timestamp. + RunningTimes emptyRunningTimes; + emptyRunningTimes.SetPostMeasurementTimeStamp(TimeStamp::NowUnfuzzed()); + return emptyRunningTimes; + } + + const RunningTimes newRunningTimes = GetRunningTimesWithTightTimestamp( + [cid = *maybeCid](RunningTimes& aRunningTimes) { + AUTO_PROFILER_STATS(GetRunningTimes_clock_gettime); + if (timespec ts; clock_gettime(cid, &ts) == 0) { + aRunningTimes.ResetThreadCPUDelta( + uint64_t(ts.tv_sec) * 1'000'000'000u + uint64_t(ts.tv_nsec)); + } else { + aRunningTimes.ClearThreadCPUDelta(); + } + }); + + const RunningTimes diff = + newRunningTimes - platformData->PreviousThreadRunningTimesRef(); + platformData->PreviousThreadRunningTimesRef() = newRunningTimes; + return diff; +} + +template <typename Func> +void Sampler::SuspendAndSampleAndResumeThread( + PSLockRef aLock, const RegisteredThread& aRegisteredThread, + const TimeStamp& aNow, const Func& aProcessRegs) { + // Only one sampler thread can be sampling at once. So we expect to have + // complete control over |sSigHandlerCoordinator|. + MOZ_ASSERT(!sSigHandlerCoordinator); + + if (mSamplerTid == -1) { + mSamplerTid = profiler_current_thread_id(); + } + int sampleeTid = aRegisteredThread.Info()->ThreadId(); + MOZ_RELEASE_ASSERT(sampleeTid != mSamplerTid); + + //----------------------------------------------------------------// + // Suspend the samplee thread and get its context. + + SigHandlerCoordinator coord; // on sampler thread's stack + sSigHandlerCoordinator = &coord; + + // Send message 1 to the samplee (the thread to be sampled), by + // signalling at it. + // This could fail if the thread doesn't exist anymore. + int r = tgkill(mMyPid, sampleeTid, SIGPROF); + if (r == 0) { + // Wait for message 2 from the samplee, indicating that the context + // is available and that the thread is suspended. + while (true) { + r = sem_wait(&sSigHandlerCoordinator->mMessage2); + if (r == -1 && errno == EINTR) { + // Interrupted by a signal. Try again. + continue; + } + // We don't expect any other kind of failure. + MOZ_ASSERT(r == 0); + break; + } + + //----------------------------------------------------------------// + // Sample the target thread. + + // WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING + // + // The profiler's "critical section" begins here. In the critical section, + // we must not do any dynamic memory allocation, nor try to acquire any lock + // or any other unshareable resource. This is because the thread to be + // sampled has been suspended at some entirely arbitrary point, and we have + // no idea which unsharable resources (locks, essentially) it holds. So any + // attempt to acquire any lock, including the implied locks used by the + // malloc implementation, risks deadlock. This includes TimeStamp::Now(), + // which gets a lock on Windows. + + // The samplee thread is now frozen and sSigHandlerCoordinator->mUContext is + // valid. We can poke around in it and unwind its stack as we like. + + // Extract the current register values. + Registers regs; + PopulateRegsFromContext(regs, &sSigHandlerCoordinator->mUContext); + aProcessRegs(regs, aNow); + + //----------------------------------------------------------------// + // Resume the target thread. + + // Send message 3 to the samplee, which tells it to resume. + r = sem_post(&sSigHandlerCoordinator->mMessage3); + MOZ_ASSERT(r == 0); + + // Wait for message 4 from the samplee, which tells us that it has + // finished with |sSigHandlerCoordinator|. + while (true) { + r = sem_wait(&sSigHandlerCoordinator->mMessage4); + if (r == -1 && errno == EINTR) { + continue; + } + MOZ_ASSERT(r == 0); + break; + } + + // The profiler's critical section ends here. After this point, none of the + // critical section limitations documented above apply. + // + // WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING + } + + // This isn't strictly necessary, but doing so does help pick up anomalies + // in which the signal handler is running when it shouldn't be. + sSigHandlerCoordinator = nullptr; +} + +// END Sampler target specifics +//////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////// +// BEGIN SamplerThread target specifics + +static void* ThreadEntry(void* aArg) { + auto thread = static_cast<SamplerThread*>(aArg); + thread->Run(); + return nullptr; +} + +SamplerThread::SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration, + double aIntervalMilliseconds) + : mSampler(aLock), + mActivityGeneration(aActivityGeneration), + mIntervalMicroseconds( + std::max(1, int(floor(aIntervalMilliseconds * 1000 + 0.5)))) { +#if defined(USE_LUL_STACKWALK) + lul::LUL* lul = CorePS::Lul(aLock); + if (!lul) { + CorePS::SetLul(aLock, MakeUnique<lul::LUL>(logging_sink_for_LUL)); + // Read all the unwind info currently available. + lul = CorePS::Lul(aLock); + read_procmaps(lul); + + // Switch into unwind mode. After this point, we can't add or remove any + // unwind info to/from this LUL instance. The only thing we can do with + // it is Unwind() calls. + lul->EnableUnwinding(); + + // Has a test been requested? + if (PR_GetEnv("MOZ_PROFILER_LUL_TEST")) { + int nTests = 0, nTestsPassed = 0; + RunLulUnitTests(&nTests, &nTestsPassed, lul); + } + } +#endif + + // Start the sampling thread. It repeatedly sends a SIGPROF signal. Sending + // the signal ourselves instead of relying on itimer provides much better + // accuracy. + // + // At least 350 KiB of stack space are needed when built with TSAN. This + // includes lul::N_STACK_BYTES plus whatever else is needed for the sampler + // thread. Set the stack size to 800 KiB to keep a safe margin above that. + pthread_attr_t attr; + if (pthread_attr_init(&attr) != 0 || + pthread_attr_setstacksize(&attr, 800 * 1024) != 0 || + pthread_create(&mThread, &attr, ThreadEntry, this) != 0) { + MOZ_CRASH("pthread_create failed"); + } + pthread_attr_destroy(&attr); +} + +SamplerThread::~SamplerThread() { + pthread_join(mThread, nullptr); + // Just in the unlikely case some callbacks were added between the end of the + // thread and now. + InvokePostSamplingCallbacks(std::move(mPostSamplingCallbackList), + SamplingState::JustStopped); +} + +void SamplerThread::SleepMicro(uint32_t aMicroseconds) { + if (aMicroseconds >= 1000000) { + // Use usleep for larger intervals, because the nanosleep + // code below only supports intervals < 1 second. + MOZ_ALWAYS_TRUE(!::usleep(aMicroseconds)); + return; + } + + struct timespec ts; + ts.tv_sec = 0; + ts.tv_nsec = aMicroseconds * 1000UL; + + int rv = ::nanosleep(&ts, &ts); + + while (rv != 0 && errno == EINTR) { + // Keep waiting in case of interrupt. + // nanosleep puts the remaining time back into ts. + rv = ::nanosleep(&ts, &ts); + } + + MOZ_ASSERT(!rv, "nanosleep call failed"); +} + +void SamplerThread::Stop(PSLockRef aLock) { + // Restore old signal handler. This is global state so it's important that + // we do it now, while gPSMutex is locked. It's safe to do this now even + // though this SamplerThread is still alive, because the next time the main + // loop of Run() iterates it won't get past the mActivityGeneration check, + // and so won't send any signals. + mSampler.Disable(aLock); +} + +// END SamplerThread target specifics +//////////////////////////////////////////////////////////////////////// + +#if defined(GP_OS_linux) || defined(GP_OS_freebsd) + +// We use pthread_atfork() to temporarily disable signal delivery during any +// fork() call. Without that, fork() can be repeatedly interrupted by signal +// delivery, requiring it to be repeatedly restarted, which can lead to *long* +// delays. See bug 837390. +// +// We provide no paf_child() function to run in the child after forking. This +// is fine because we always immediately exec() after fork(), and exec() +// clobbers all process state. (At one point we did have a paf_child() +// function, but it caused problems related to locking gPSMutex. See bug +// 1348374.) +// +// Unfortunately all this is only doable on non-Android because Bionic doesn't +// have pthread_atfork. + +// In the parent, before the fork, record IsSamplingPaused, and then pause. +static void paf_prepare() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + PSAutoLock lock(gPSMutex); + + if (ActivePS::Exists(lock)) { + ActivePS::SetWasSamplingPaused(lock, ActivePS::IsSamplingPaused(lock)); + ActivePS::SetIsSamplingPaused(lock, true); + } +} + +// In the parent, after the fork, return IsSamplingPaused to the pre-fork state. +static void paf_parent() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + PSAutoLock lock(gPSMutex); + + if (ActivePS::Exists(lock)) { + ActivePS::SetIsSamplingPaused(lock, ActivePS::WasSamplingPaused(lock)); + ActivePS::SetWasSamplingPaused(lock, false); + } +} + +static void PlatformInit(PSLockRef aLock) { + // Set up the fork handlers. + pthread_atfork(paf_prepare, paf_parent, nullptr); +} + +#else + +static void PlatformInit(PSLockRef aLock) {} + +#endif + +#if defined(HAVE_NATIVE_UNWIND) +// Context used by synchronous samples. It's safe to have a single one because +// only one synchronous sample can be taken at a time (due to +// profiler_get_backtrace()'s PSAutoLock). +ucontext_t sSyncUContext; + +void Registers::SyncPopulate() { + if (!getcontext(&sSyncUContext)) { + PopulateRegsFromContext(*this, &sSyncUContext); + } +} +#endif diff --git a/tools/profiler/core/platform-macos.cpp b/tools/profiler/core/platform-macos.cpp new file mode 100644 index 0000000000..d0b44076c3 --- /dev/null +++ b/tools/profiler/core/platform-macos.cpp @@ -0,0 +1,279 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include <unistd.h> +#include <sys/mman.h> +#include <mach/mach_init.h> +#include <mach-o/getsect.h> + +#include <AvailabilityMacros.h> + +#include <pthread.h> +#include <semaphore.h> +#include <signal.h> +#include <libkern/OSAtomic.h> +#include <libproc.h> +#include <mach/mach.h> +#include <mach/semaphore.h> +#include <mach/task.h> +#include <mach/thread_act.h> +#include <mach/vm_statistics.h> +#include <sys/time.h> +#include <sys/resource.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <sys/sysctl.h> +#include <stdarg.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <math.h> + +// this port is based off of v8 svn revision 9837 + +int profiler_current_process_id() { return getpid(); } + +int profiler_current_thread_id() { + return static_cast<int>(static_cast<pid_t>(syscall(SYS_thread_selfid))); +} + +void* GetStackTop(void* aGuess) { + pthread_t thread = pthread_self(); + return pthread_get_stackaddr_np(thread); +} + +class PlatformData { + public: + explicit PlatformData(int aThreadId) : mProfiledThread(mach_thread_self()) { + MOZ_COUNT_CTOR(PlatformData); + } + + ~PlatformData() { + // Deallocate Mach port for thread. + mach_port_deallocate(mach_task_self(), mProfiledThread); + + MOZ_COUNT_DTOR(PlatformData); + } + + thread_act_t ProfiledThread() const { return mProfiledThread; } + + RunningTimes& PreviousThreadRunningTimesRef() { + return mPreviousThreadRunningTimes; + } + + private: + // Note: for mProfiledThread Mach primitives are used instead of pthread's + // because the latter doesn't provide thread manipulation primitives required. + // For details, consult "Mac OS X Internals" book, Section 7.3. + thread_act_t mProfiledThread; + RunningTimes mPreviousThreadRunningTimes; +}; + +//////////////////////////////////////////////////////////////////////// +// BEGIN Sampler target specifics + +Sampler::Sampler(PSLockRef aLock) {} + +void Sampler::Disable(PSLockRef aLock) {} + +static void StreamMetaPlatformSampleUnits(PSLockRef aLock, + SpliceableJSONWriter& aWriter) { + // Microseconds. + aWriter.StringProperty("threadCPUDelta", "\u00B5s"); +} + +static RunningTimes GetThreadRunningTimesDiff( + PSLockRef aLock, const RegisteredThread& aRegisteredThread) { + AUTO_PROFILER_STATS(GetRunningTimes); + + PlatformData* platformData = aRegisteredThread.GetPlatformData(); + MOZ_RELEASE_ASSERT(platformData); + + const RunningTimes newRunningTimes = GetRunningTimesWithTightTimestamp( + [platformData](RunningTimes& aRunningTimes) { + AUTO_PROFILER_STATS(GetRunningTimes_thread_info); + thread_basic_info_data_t threadBasicInfo; + mach_msg_type_number_t basicCount = THREAD_BASIC_INFO_COUNT; + if (thread_info(platformData->ProfiledThread(), THREAD_BASIC_INFO, + reinterpret_cast<thread_info_t>(&threadBasicInfo), + &basicCount) == KERN_SUCCESS && + basicCount == THREAD_BASIC_INFO_COUNT) { + uint64_t userTimeUs = + uint64_t(threadBasicInfo.user_time.seconds) * + uint64_t(USEC_PER_SEC) + + uint64_t(threadBasicInfo.user_time.microseconds); + uint64_t systemTimeUs = + uint64_t(threadBasicInfo.system_time.seconds) * + uint64_t(USEC_PER_SEC) + + uint64_t(threadBasicInfo.system_time.microseconds); + aRunningTimes.ResetThreadCPUDelta(userTimeUs + systemTimeUs); + } else { + aRunningTimes.ClearThreadCPUDelta(); + } + }); + + const RunningTimes diff = + newRunningTimes - platformData->PreviousThreadRunningTimesRef(); + platformData->PreviousThreadRunningTimesRef() = newRunningTimes; + return diff; +} + +template <typename Func> +void Sampler::SuspendAndSampleAndResumeThread( + PSLockRef aLock, const RegisteredThread& aRegisteredThread, + const TimeStamp& aNow, const Func& aProcessRegs) { + thread_act_t samplee_thread = + aRegisteredThread.GetPlatformData()->ProfiledThread(); + + //----------------------------------------------------------------// + // Suspend the samplee thread and get its context. + + // We're using thread_suspend on OS X because pthread_kill (which is what we + // at one time used on Linux) has less consistent performance and causes + // strange crashes, see bug 1166778 and bug 1166808. thread_suspend + // is also just a lot simpler to use. + + if (KERN_SUCCESS != thread_suspend(samplee_thread)) { + return; + } + + //----------------------------------------------------------------// + // Sample the target thread. + + // WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING + // + // The profiler's "critical section" begins here. We must be very careful + // what we do here, or risk deadlock. See the corresponding comment in + // platform-linux-android.cpp for details. + +#if defined(__x86_64__) + thread_state_flavor_t flavor = x86_THREAD_STATE64; + x86_thread_state64_t state; + mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT; +# if __DARWIN_UNIX03 +# define REGISTER_FIELD(name) __r##name +# else +# define REGISTER_FIELD(name) r##name +# endif // __DARWIN_UNIX03 +#elif defined(__aarch64__) + thread_state_flavor_t flavor = ARM_THREAD_STATE64; + arm_thread_state64_t state; + mach_msg_type_number_t count = ARM_THREAD_STATE64_COUNT; +# if __DARWIN_UNIX03 +# define REGISTER_FIELD(name) __##name +# else +# define REGISTER_FIELD(name) name +# endif // __DARWIN_UNIX03 +#else +# error "unknown architecture" +#endif + + if (thread_get_state(samplee_thread, flavor, + reinterpret_cast<natural_t*>(&state), + &count) == KERN_SUCCESS) { + Registers regs; +#if defined(__x86_64__) + regs.mPC = reinterpret_cast<Address>(state.REGISTER_FIELD(ip)); + regs.mSP = reinterpret_cast<Address>(state.REGISTER_FIELD(sp)); + regs.mFP = reinterpret_cast<Address>(state.REGISTER_FIELD(bp)); +#elif defined(__aarch64__) + regs.mPC = reinterpret_cast<Address>(state.REGISTER_FIELD(pc)); + regs.mSP = reinterpret_cast<Address>(state.REGISTER_FIELD(sp)); + regs.mFP = reinterpret_cast<Address>(state.REGISTER_FIELD(fp)); +#else +# error "unknown architecture" +#endif + regs.mLR = 0; + + aProcessRegs(regs, aNow); + } + +#undef REGISTER_FIELD + + //----------------------------------------------------------------// + // Resume the target thread. + + thread_resume(samplee_thread); + + // The profiler's critical section ends here. + // + // WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING +} + +// END Sampler target specifics +//////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////// +// BEGIN SamplerThread target specifics + +static void* ThreadEntry(void* aArg) { + auto thread = static_cast<SamplerThread*>(aArg); + thread->Run(); + return nullptr; +} + +SamplerThread::SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration, + double aIntervalMilliseconds) + : mSampler(aLock), + mActivityGeneration(aActivityGeneration), + mIntervalMicroseconds( + std::max(1, int(floor(aIntervalMilliseconds * 1000 + 0.5)))), + mThread{nullptr} { + pthread_attr_t* attr_ptr = nullptr; + if (pthread_create(&mThread, attr_ptr, ThreadEntry, this) != 0) { + MOZ_CRASH("pthread_create failed"); + } +} + +SamplerThread::~SamplerThread() { + pthread_join(mThread, nullptr); + // Just in the unlikely case some callbacks were added between the end of the + // thread and now. + InvokePostSamplingCallbacks(std::move(mPostSamplingCallbackList), + SamplingState::JustStopped); +} + +void SamplerThread::SleepMicro(uint32_t aMicroseconds) { + usleep(aMicroseconds); + // FIXME: the OSX 10.12 page for usleep says "The usleep() function is + // obsolescent. Use nanosleep(2) instead." This implementation could be + // merged with the linux-android version. Also, this doesn't handle the + // case where the usleep call is interrupted by a signal. +} + +void SamplerThread::Stop(PSLockRef aLock) { mSampler.Disable(aLock); } + +// END SamplerThread target specifics +//////////////////////////////////////////////////////////////////////// + +static void PlatformInit(PSLockRef aLock) {} + +#if defined(HAVE_NATIVE_UNWIND) +void Registers::SyncPopulate() { +# if defined(__x86_64__) + asm( + // Compute caller's %rsp by adding to %rbp: + // 8 bytes for previous %rbp, 8 bytes for return address + "leaq 0x10(%%rbp), %0\n\t" + // Dereference %rbp to get previous %rbp + "movq (%%rbp), %1\n\t" + : "=r"(mSP), "=r"(mFP)); +# elif defined(__aarch64__) + asm( + // Compute caller's sp by adding to fp: + // 8 bytes for previous fp, 8 bytes for return address + "add %0, x29, #0x10\n\t" + // Dereference fp to get previous fp + "ldr %1, [x29]\n\t" + : "=r"(mSP), "=r"(mFP)); +# else +# error "unknown architecture" +# endif + mPC = reinterpret_cast<Address>( + __builtin_extract_return_addr(__builtin_return_address(0))); + mLR = 0; +} +#endif diff --git a/tools/profiler/core/platform-win32.cpp b/tools/profiler/core/platform-win32.cpp new file mode 100644 index 0000000000..5b0b72aa59 --- /dev/null +++ b/tools/profiler/core/platform-win32.cpp @@ -0,0 +1,336 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +// Copyright (c) 2006-2011 The Chromium Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google, Inc. nor the names of its contributors +// may be used to endorse or promote products derived from this +// software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +// SUCH DAMAGE. + +#include <windows.h> +#include <mmsystem.h> +#include <process.h> + +#include "nsWindowsDllInterceptor.h" +#include "mozilla/StackWalk_windows.h" +#include "mozilla/WindowsVersion.h" + +int profiler_current_process_id() { return _getpid(); } + +int profiler_current_thread_id() { + DWORD threadId = GetCurrentThreadId(); + MOZ_ASSERT(threadId <= INT32_MAX, "native thread ID is > INT32_MAX"); + return int(threadId); +} + +void* GetStackTop(void* aGuess) { + PNT_TIB pTib = reinterpret_cast<PNT_TIB>(NtCurrentTeb()); + return reinterpret_cast<void*>(pTib->StackBase); +} + +static void PopulateRegsFromContext(Registers& aRegs, CONTEXT* aContext) { +#if defined(GP_ARCH_amd64) + aRegs.mPC = reinterpret_cast<Address>(aContext->Rip); + aRegs.mSP = reinterpret_cast<Address>(aContext->Rsp); + aRegs.mFP = reinterpret_cast<Address>(aContext->Rbp); +#elif defined(GP_ARCH_x86) + aRegs.mPC = reinterpret_cast<Address>(aContext->Eip); + aRegs.mSP = reinterpret_cast<Address>(aContext->Esp); + aRegs.mFP = reinterpret_cast<Address>(aContext->Ebp); +#elif defined(GP_ARCH_arm64) + aRegs.mPC = reinterpret_cast<Address>(aContext->Pc); + aRegs.mSP = reinterpret_cast<Address>(aContext->Sp); + aRegs.mFP = reinterpret_cast<Address>(aContext->Fp); +#else +# error "bad arch" +#endif + aRegs.mLR = 0; +} + +// Gets a real (i.e. not pseudo) handle for the current thread, with the +// permissions needed for profiling. +// @return a real HANDLE for the current thread. +static HANDLE GetRealCurrentThreadHandleForProfiling() { + HANDLE realCurrentThreadHandle; + if (!::DuplicateHandle( + ::GetCurrentProcess(), ::GetCurrentThread(), ::GetCurrentProcess(), + &realCurrentThreadHandle, + THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME | THREAD_QUERY_INFORMATION, + FALSE, 0)) { + return nullptr; + } + + return realCurrentThreadHandle; +} + +class PlatformData { + public: + // Get a handle to the calling thread. This is the thread that we are + // going to profile. We need a real handle because we are going to use it in + // the sampler thread. + explicit PlatformData(int aThreadId) + : mProfiledThread(GetRealCurrentThreadHandleForProfiling()) { + MOZ_ASSERT(aThreadId == ::GetCurrentThreadId()); + MOZ_COUNT_CTOR(PlatformData); + } + + ~PlatformData() { + if (mProfiledThread != nullptr) { + CloseHandle(mProfiledThread); + mProfiledThread = nullptr; + } + MOZ_COUNT_DTOR(PlatformData); + } + + HANDLE ProfiledThread() { return mProfiledThread; } + + RunningTimes& PreviousThreadRunningTimesRef() { + return mPreviousThreadRunningTimes; + } + + private: + HANDLE mProfiledThread; + RunningTimes mPreviousThreadRunningTimes; +}; + +#if defined(USE_MOZ_STACK_WALK) +HANDLE +GetThreadHandle(PlatformData* aData) { return aData->ProfiledThread(); } +#endif + +static const HANDLE kNoThread = INVALID_HANDLE_VALUE; + +//////////////////////////////////////////////////////////////////////// +// BEGIN Sampler target specifics + +Sampler::Sampler(PSLockRef aLock) {} + +void Sampler::Disable(PSLockRef aLock) {} + +static void StreamMetaPlatformSampleUnits(PSLockRef aLock, + SpliceableJSONWriter& aWriter) { + aWriter.StringProperty("threadCPUDelta", "variable CPU cycles"); +} + +static RunningTimes GetThreadRunningTimesDiff( + PSLockRef aLock, const RegisteredThread& aRegisteredThread) { + AUTO_PROFILER_STATS(GetRunningTimes); + + PlatformData* const platformData = aRegisteredThread.GetPlatformData(); + MOZ_RELEASE_ASSERT(platformData); + const HANDLE profiledThread = platformData->ProfiledThread(); + + const RunningTimes newRunningTimes = GetRunningTimesWithTightTimestamp( + [profiledThread](RunningTimes& aRunningTimes) { + AUTO_PROFILER_STATS(GetRunningTimes_QueryThreadCycleTime); + if (ULONG64 cycles; + QueryThreadCycleTime(profiledThread, &cycles) != 0) { + aRunningTimes.ResetThreadCPUDelta(cycles); + } else { + aRunningTimes.ClearThreadCPUDelta(); + } + }); + + const RunningTimes diff = + newRunningTimes - platformData->PreviousThreadRunningTimesRef(); + platformData->PreviousThreadRunningTimesRef() = newRunningTimes; + return diff; +} + +template <typename Func> +void Sampler::SuspendAndSampleAndResumeThread( + PSLockRef aLock, const RegisteredThread& aRegisteredThread, + const TimeStamp& aNow, const Func& aProcessRegs) { + HANDLE profiled_thread = + aRegisteredThread.GetPlatformData()->ProfiledThread(); + if (profiled_thread == nullptr) { + return; + } + + // Context used for sampling the register state of the profiled thread. + CONTEXT context; + memset(&context, 0, sizeof(context)); + + //----------------------------------------------------------------// + // Suspend the samplee thread and get its context. + + static const DWORD kSuspendFailed = static_cast<DWORD>(-1); + if (SuspendThread(profiled_thread) == kSuspendFailed) { + return; + } + + // SuspendThread is asynchronous, so the thread may still be running. + // Call GetThreadContext first to ensure the thread is really suspended. + // See https://blogs.msdn.microsoft.com/oldnewthing/20150205-00/?p=44743. + + // Using only CONTEXT_CONTROL is faster but on 64-bit it causes crashes in + // RtlVirtualUnwind (see bug 1120126) so we set all the flags. +#if defined(GP_ARCH_amd64) + context.ContextFlags = CONTEXT_FULL; +#else + context.ContextFlags = CONTEXT_CONTROL; +#endif + if (!GetThreadContext(profiled_thread, &context)) { + ResumeThread(profiled_thread); + return; + } + + //----------------------------------------------------------------// + // Sample the target thread. + + // WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING + // + // The profiler's "critical section" begins here. We must be very careful + // what we do here, or risk deadlock. See the corresponding comment in + // platform-linux-android.cpp for details. + + Registers regs; + PopulateRegsFromContext(regs, &context); + aProcessRegs(regs, aNow); + + //----------------------------------------------------------------// + // Resume the target thread. + + ResumeThread(profiled_thread); + + // The profiler's critical section ends here. + // + // WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING +} + +// END Sampler target specifics +//////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////// +// BEGIN SamplerThread target specifics + +static unsigned int __stdcall ThreadEntry(void* aArg) { + auto thread = static_cast<SamplerThread*>(aArg); + thread->Run(); + return 0; +} + +SamplerThread::SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration, + double aIntervalMilliseconds) + : mSampler(aLock), + mActivityGeneration(aActivityGeneration), + mIntervalMicroseconds( + std::max(1, int(floor(aIntervalMilliseconds * 1000 + 0.5)))) { + // By default we'll not adjust the timer resolution which tends to be + // around 16ms. However, if the requested interval is sufficiently low + // we'll try to adjust the resolution to match. + if (mIntervalMicroseconds < 10 * 1000) { + ::timeBeginPeriod(mIntervalMicroseconds / 1000); + } + + // Create a new thread. It is important to use _beginthreadex() instead of + // the Win32 function CreateThread(), because the CreateThread() does not + // initialize thread-specific structures in the C runtime library. + mThread = reinterpret_cast<HANDLE>(_beginthreadex(nullptr, + /* stack_size */ 0, + ThreadEntry, this, + /* initflag */ 0, nullptr)); + if (mThread == 0) { + MOZ_CRASH("_beginthreadex failed"); + } +} + +SamplerThread::~SamplerThread() { + WaitForSingleObject(mThread, INFINITE); + + // Close our own handle for the thread. + if (mThread != kNoThread) { + CloseHandle(mThread); + } + + // Just in the unlikely case some callbacks were added between the end of the + // thread and now. + InvokePostSamplingCallbacks(std::move(mPostSamplingCallbackList), + SamplingState::JustStopped); +} + +void SamplerThread::SleepMicro(uint32_t aMicroseconds) { + // For now, keep the old behaviour of minimum Sleep(1), even for + // smaller-than-usual sleeps after an overshoot, unless the user has + // explicitly opted into a sub-millisecond profiler interval. + if (mIntervalMicroseconds >= 1000) { + ::Sleep(std::max(1u, aMicroseconds / 1000)); + } else { + TimeStamp start = TimeStamp::NowUnfuzzed(); + TimeStamp end = start + TimeDuration::FromMicroseconds(aMicroseconds); + + // First, sleep for as many whole milliseconds as possible. + if (aMicroseconds >= 1000) { + ::Sleep(aMicroseconds / 1000); + } + + // Then, spin until enough time has passed. + while (TimeStamp::NowUnfuzzed() < end) { + YieldProcessor(); + } + } +} + +void SamplerThread::Stop(PSLockRef aLock) { + // Disable any timer resolution changes we've made. Do it now while + // gPSMutex is locked, i.e. before any other SamplerThread can be created + // and call ::timeBeginPeriod(). + // + // It's safe to do this now even though this SamplerThread is still alive, + // because the next time the main loop of Run() iterates it won't get past + // the mActivityGeneration check, and so it won't make any more ::Sleep() + // calls. + if (mIntervalMicroseconds < 10 * 1000) { + ::timeEndPeriod(mIntervalMicroseconds / 1000); + } + + mSampler.Disable(aLock); +} + +// END SamplerThread target specifics +//////////////////////////////////////////////////////////////////////// + +static void PlatformInit(PSLockRef aLock) {} + +#if defined(HAVE_NATIVE_UNWIND) +void Registers::SyncPopulate() { + CONTEXT context; + RtlCaptureContext(&context); + PopulateRegsFromContext(*this, &context); +} +#endif + +#if defined(GP_PLAT_amd64_windows) + +// Use InitializeWin64ProfilerHooks from the base profiler. + +namespace mozilla { +namespace baseprofiler { +MFBT_API void InitializeWin64ProfilerHooks(); +} // namespace baseprofiler +} // namespace mozilla + +using mozilla::baseprofiler::InitializeWin64ProfilerHooks; + +#endif // defined(GP_PLAT_amd64_windows) diff --git a/tools/profiler/core/platform.cpp b/tools/profiler/core/platform.cpp new file mode 100644 index 0000000000..9b193535d9 --- /dev/null +++ b/tools/profiler/core/platform.cpp @@ -0,0 +1,5894 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// There are three kinds of samples done by the profiler. +// +// - A "periodic" sample is the most complex kind. It is done in response to a +// timer while the profiler is active. It involves writing a stack trace plus +// a variety of other values (memory measurements, responsiveness +// measurements, markers, etc.) into the main ProfileBuffer. The sampling is +// done from off-thread, and so SuspendAndSampleAndResumeThread() is used to +// get the register values. +// +// - A "synchronous" sample is a simpler kind. It is done in response to an API +// call (profiler_get_backtrace()). It involves writing a stack trace and +// little else into a temporary ProfileBuffer, and wrapping that up in a +// ProfilerBacktrace that can be subsequently used in a marker. The sampling +// is done on-thread, and so Registers::SyncPopulate() is used to get the +// register values. +// +// - A "backtrace" sample is the simplest kind. It is done in response to an +// API call (profiler_suspend_and_sample_thread()). It involves getting a +// stack trace via a ProfilerStackCollector; it does not write to a +// ProfileBuffer. The sampling is done from off-thread, and so uses +// SuspendAndSampleAndResumeThread() to get the register values. + +#include "platform.h" + +#include "GeckoProfiler.h" +#include "GeckoProfilerReporter.h" +#include "PageInformation.h" +#include "ProfileBuffer.h" +#include "ProfiledThreadData.h" +#include "ProfilerBacktrace.h" +#include "ProfilerChild.h" +#include "ProfilerCodeAddressService.h" +#include "ProfilerIOInterposeObserver.h" +#include "ProfilerParent.h" +#include "RegisteredThread.h" +#include "shared-libraries.h" +#include "ThreadInfo.h" +#include "VTuneProfiler.h" + +#include "js/TraceLoggerAPI.h" +#include "js/ProfilingFrameIterator.h" +#include "memory_hooks.h" +#include "mozilla/ArrayUtils.h" +#include "mozilla/Atomics.h" +#include "mozilla/AutoProfilerLabel.h" +#include "mozilla/ExtensionPolicyService.h" +#include "mozilla/extensions/WebExtensionPolicy.h" +#include "mozilla/net/HttpBaseChannel.h" // for net::TimingStruct +#include "mozilla/Printf.h" +#include "mozilla/ProfileBufferChunkManagerSingle.h" +#include "mozilla/ProfileBufferChunkManagerWithLocalLimit.h" +#include "mozilla/ProfileChunkedBuffer.h" +#include "mozilla/SchedulerGroup.h" +#include "mozilla/Services.h" +#include "mozilla/StackWalk.h" +#include "mozilla/StaticPtr.h" +#include "mozilla/ThreadLocal.h" +#include "mozilla/TimeStamp.h" +#include "mozilla/Tuple.h" +#include "mozilla/UniquePtr.h" +#include "mozilla/Vector.h" +#include "BaseProfiler.h" +#include "nsDirectoryServiceDefs.h" +#include "nsDirectoryServiceUtils.h" +#include "nsIDocShell.h" +#include "nsIHttpProtocolHandler.h" +#include "nsIObserverService.h" +#include "nsIPropertyBag2.h" +#include "nsIXULAppInfo.h" +#include "nsIXULRuntime.h" +#include "nsJSPrincipals.h" +#include "nsMemoryReporterManager.h" +#include "nsProfilerStartParams.h" +#include "nsScriptSecurityManager.h" +#include "nsSystemInfo.h" +#include "nsThreadUtils.h" +#include "nsXULAppAPI.h" +#include "Tracing.h" +#include "prdtoa.h" +#include "prtime.h" + +#include <algorithm> +#include <errno.h> +#include <fstream> +#include <ostream> +#include <set> +#include <sstream> +#include <type_traits> + +#ifdef MOZ_TASK_TRACER +# include "GeckoTaskTracer.h" +#endif + +#if defined(GP_OS_android) +# include "mozilla/java/GeckoJavaSamplerNatives.h" +#endif + +// Win32 builds always have frame pointers, so FramePointerStackWalk() always +// works. +#if defined(GP_PLAT_x86_windows) +# define HAVE_NATIVE_UNWIND +# define USE_FRAME_POINTER_STACK_WALK +#endif + +// Win64 builds always omit frame pointers, so we use the slower +// MozStackWalk(), which works in that case. +#if defined(GP_PLAT_amd64_windows) +# define HAVE_NATIVE_UNWIND +# define USE_MOZ_STACK_WALK +#endif + +// AArch64 Win64 doesn't seem to use frame pointers, so we use the slower +// MozStackWalk(). +#if defined(GP_PLAT_arm64_windows) +# define HAVE_NATIVE_UNWIND +# define USE_MOZ_STACK_WALK +#endif + +// Mac builds only have frame pointers when MOZ_PROFILING is specified, so +// FramePointerStackWalk() only works in that case. We don't use MozStackWalk() +// on Mac. +#if defined(GP_OS_darwin) && defined(MOZ_PROFILING) +# define HAVE_NATIVE_UNWIND +# define USE_FRAME_POINTER_STACK_WALK +#endif + +// Android builds use the ARM Exception Handling ABI to unwind. +#if defined(GP_PLAT_arm_linux) || defined(GP_PLAT_arm_android) +# define HAVE_NATIVE_UNWIND +# define USE_EHABI_STACKWALK +# include "EHABIStackWalk.h" +#endif + +// Linux/BSD builds use LUL, which uses DWARF info to unwind stacks. +#if defined(GP_PLAT_amd64_linux) || defined(GP_PLAT_x86_linux) || \ + defined(GP_PLAT_amd64_android) || defined(GP_PLAT_x86_android) || \ + defined(GP_PLAT_mips64_linux) || defined(GP_PLAT_arm64_linux) || \ + defined(GP_PLAT_arm64_android) || defined(GP_PLAT_amd64_freebsd) || \ + defined(GP_PLAT_arm64_freebsd) +# define HAVE_NATIVE_UNWIND +# define USE_LUL_STACKWALK +# include "lul/LulMain.h" +# include "lul/platform-linux-lul.h" + +// On linux we use LUL for periodic samples and synchronous samples, but we use +// FramePointerStackWalk for backtrace samples when MOZ_PROFILING is enabled. +// (See the comment at the top of the file for a definition of +// periodic/synchronous/backtrace.). +// +// FramePointerStackWalk can produce incomplete stacks when the current entry is +// in a shared library without framepointers, however LUL can take a long time +// to initialize, which is undesirable for consumers of +// profiler_suspend_and_sample_thread like the Background Hang Reporter. +# if defined(MOZ_PROFILING) +# define USE_FRAME_POINTER_STACK_WALK +# endif +#endif + +// We can only stackwalk without expensive initialization on platforms which +// support FramePointerStackWalk or MozStackWalk. LUL Stackwalking requires +// initializing LUL, and EHABIStackWalk requires initializing EHABI, both of +// which can be expensive. +#if defined(USE_FRAME_POINTER_STACK_WALK) || defined(USE_MOZ_STACK_WALK) +# define HAVE_FASTINIT_NATIVE_UNWIND +#endif + +#ifdef MOZ_VALGRIND +# include <valgrind/memcheck.h> +#else +# define VALGRIND_MAKE_MEM_DEFINED(_addr, _len) ((void)0) +#endif + +#if defined(GP_OS_linux) || defined(GP_OS_android) || defined(GP_OS_freebsd) +# include <ucontext.h> +#endif + +using namespace mozilla; +using mozilla::profiler::detail::RacyFeatures; + +LazyLogModule gProfilerLog("prof"); + +// Statically initialized to 0, then set once from profiler_init(), which should +// be called from the main thread before any other use of the profiler. +int scProfilerMainThreadId; + +#if defined(GP_OS_android) +class GeckoJavaSampler + : public java::GeckoJavaSampler::Natives<GeckoJavaSampler> { + private: + GeckoJavaSampler(); + + public: + static double GetProfilerTime() { + if (!profiler_is_active()) { + return 0.0; + } + return profiler_time(); + }; +}; +#endif + +constexpr static bool ValidateFeatures() { + int expectedFeatureNumber = 0; + + // Feature numbers should start at 0 and increase by 1 each. +#define CHECK_FEATURE(n_, str_, Name_, desc_) \ + if ((n_) != expectedFeatureNumber) { \ + return false; \ + } \ + ++expectedFeatureNumber; + + PROFILER_FOR_EACH_FEATURE(CHECK_FEATURE) + +#undef CHECK_FEATURE + + return true; +} + +static_assert(ValidateFeatures(), "Feature list is invalid"); + +// Return all features that are available on this platform. +static uint32_t AvailableFeatures() { + uint32_t features = 0; + +#define ADD_FEATURE(n_, str_, Name_, desc_) \ + ProfilerFeature::Set##Name_(features); + + // Add all the possible features. + PROFILER_FOR_EACH_FEATURE(ADD_FEATURE) + +#undef ADD_FEATURE + + // Now remove features not supported on this platform/configuration. +#if !defined(GP_OS_android) + ProfilerFeature::ClearJava(features); +#endif +#if !defined(HAVE_NATIVE_UNWIND) + ProfilerFeature::ClearStackWalk(features); +#endif +#if !defined(MOZ_TASK_TRACER) + ProfilerFeature::ClearTaskTracer(features); +#endif +#if defined(MOZ_REPLACE_MALLOC) && defined(MOZ_PROFILER_MEMORY) + if (getenv("XPCOM_MEM_BLOAT_LOG")) { + NS_WARNING("XPCOM_MEM_BLOAT_LOG is set, disabling native allocations."); + // The memory hooks are available, but the bloat log is enabled, which is + // not compatible with the native allocations tracking. See the comment in + // enable_native_allocations() (tools/profiler/core/memory_hooks.cpp) for + // more information. + ProfilerFeature::ClearNativeAllocations(features); + } +#else + // The memory hooks are not available. + ProfilerFeature::ClearNativeAllocations(features); +#endif + if (!JS::TraceLoggerSupported()) { + ProfilerFeature::ClearJSTracer(features); + } + + return features; +} + +// Default features common to all contexts (even if not available). +static uint32_t DefaultFeatures() { + return ProfilerFeature::Java | ProfilerFeature::JS | ProfilerFeature::Leaf | + ProfilerFeature::StackWalk | ProfilerFeature::Threads | + ProfilerFeature::Screenshots; +} + +// Extra default features when MOZ_PROFILER_STARTUP is set (even if not +// available). +static uint32_t StartupExtraDefaultFeatures() { + // Enable CPUUtilization by default for startup profiles as it is useful to + // see when startup alternates between CPU intensive tasks and being blocked. + // Enable file I/Os by default for startup profiles as startup is heavy on + // I/O operations. + return ProfilerFeature::CPUUtilization | ProfilerFeature::FileIOAll; +} + +// The class is a thin shell around mozglue PlatformMutex. It does not preserve +// behavior in JS record/replay. It provides a mechanism to determine if it is +// locked or not in order for memory hooks to avoid re-entering the profiler +// locked state. +class PSMutex : private ::mozilla::detail::MutexImpl { + public: + PSMutex() : ::mozilla::detail::MutexImpl() {} + + void Lock() { + const int tid = profiler_current_thread_id(); + MOZ_ASSERT(tid != 0); + + // This is only designed to catch recursive locking: + // - If the current thread doesn't own the mutex, `mOwningThreadId` must be + // zero or a different thread id written by another thread; it may change + // again at any time, but never to the current thread's id. + // - If the current thread owns the mutex, `mOwningThreadId` must be its id. + MOZ_ASSERT(mOwningThreadId != tid); + + ::mozilla::detail::MutexImpl::lock(); + + // We now hold the mutex, it should have been in the unlocked state before. + MOZ_ASSERT(mOwningThreadId == 0); + // And we can write our own thread id. + mOwningThreadId = tid; + } + + [[nodiscard]] bool TryLock() { + const int tid = profiler_current_thread_id(); + MOZ_ASSERT(tid != 0); + + // This is only designed to catch recursive locking: + // - If the current thread doesn't own the mutex, `mOwningThreadId` must be + // zero or a different thread id written by another thread; it may change + // again at any time, but never to the current thread's id. + // - If the current thread owns the mutex, `mOwningThreadId` must be its id. + MOZ_ASSERT(mOwningThreadId != tid); + + if (!::mozilla::detail::MutexImpl::tryLock()) { + // Failed to lock, nothing more to do. + return false; + } + + // We now hold the mutex, it should have been in the unlocked state before. + MOZ_ASSERT(mOwningThreadId == 0); + // And we can write our own thread id. + mOwningThreadId = tid; + + return true; + } + + void Unlock() { + // This should never trigger! But check just in case something has gone + // very wrong (e.g., memory corruption). + AssertCurrentThreadOwns(); + + // We're still holding the mutex here, so it's safe to just reset + // `mOwningThreadId`. + mOwningThreadId = 0; + + ::mozilla::detail::MutexImpl::unlock(); + } + + // Does the current thread own this mutex? + // False positive or false negatives are not possible: + // - If `true`, the current thread owns the mutex, it has written its own + // `mOwningThreadId` when taking the lock, and no-one else can modify it + // until the current thread itself unlocks the mutex. + // - If `false`, the current thread does not own the mutex, therefore either + // `mOwningThreadId` is zero (unlocked), or it is a different thread id + // written by another thread, but it can never be the current thread's id + // until the current thread itself locks the mutex. + bool IsLockedOnCurrentThread() const { + return mOwningThreadId == profiler_current_thread_id(); + } + + void AssertCurrentThreadOwns() const { + MOZ_ASSERT(IsLockedOnCurrentThread()); + } + + void AssertCurrentThreadDoesNotOwn() const { + MOZ_ASSERT(!IsLockedOnCurrentThread()); + } + + private: + // Zero when unlocked, or the thread id of the owning thread. + // This should only be used to compare with the current thread id; any other + // number (0 or other id) could change at any time because the current thread + // wouldn't own the lock. + Atomic<int, MemoryOrdering::SequentiallyConsistent> mOwningThreadId{0}; +}; + +// RAII class to lock the profiler mutex. +class MOZ_RAII PSAutoLock { + public: + explicit PSAutoLock(PSMutex& aMutex) : mMutex(aMutex) { mMutex.Lock(); } + ~PSAutoLock() { mMutex.Unlock(); } + + private: + // Allow PSAutoTryLock to call the following `PSAutoLock(PSMutex&, int)` + // constructor through `Maybe<const PSAutoLock>::emplace()`. + friend class Maybe<const PSAutoLock>; + + // Special constructor taking an already-locked mutex. The `int` parameter is + // necessary to distinguish it from the main constructor. + PSAutoLock(PSMutex& aAlreadyLockedMutex, int) : mMutex(aAlreadyLockedMutex) { + mMutex.AssertCurrentThreadOwns(); + } + + PSMutex& mMutex; +}; + +// RAII class that attempts to lock the profiler mutex. Example usage: +// PSAutoTryLock tryLock(gPSMutex); +// if (tryLock.IsLocked()) { locked_foo(tryLock.LockRef()); } +class MOZ_RAII PSAutoTryLock { + public: + explicit PSAutoTryLock(PSMutex& aMutex) { + if (aMutex.TryLock()) { + mMaybePSAutoLock.emplace(aMutex, 0); + } + } + + // Return true if the mutex was aquired and locked. + [[nodiscard]] bool IsLocked() const { return mMaybePSAutoLock.isSome(); } + + // Assuming the mutex is locked, return a reference to a `PSAutoLock` for that + // mutex, which can be passed as proof-of-lock. + [[nodiscard]] const PSAutoLock& LockRef() const { + MOZ_ASSERT(IsLocked()); + return mMaybePSAutoLock.ref(); + } + + private: + // `mMaybePSAutoLock` is `Nothing` if locking failed, otherwise it contains a + // `const PSAutoLock` holding the locked mutex, and whose reference may be + // passed to functions expecting a proof-of-lock. + Maybe<const PSAutoLock> mMaybePSAutoLock; +}; + +// Only functions that take a PSLockRef arg can access CorePS's and ActivePS's +// fields. +typedef const PSAutoLock& PSLockRef; + +#define PS_GET(type_, name_) \ + static type_ name_(PSLockRef) { \ + MOZ_ASSERT(sInstance); \ + return sInstance->m##name_; \ + } + +#define PS_GET_LOCKLESS(type_, name_) \ + static type_ name_() { \ + MOZ_ASSERT(sInstance); \ + return sInstance->m##name_; \ + } + +#define PS_GET_AND_SET(type_, name_) \ + PS_GET(type_, name_) \ + static void Set##name_(PSLockRef, type_ a##name_) { \ + MOZ_ASSERT(sInstance); \ + sInstance->m##name_ = a##name_; \ + } + +static const size_t MAX_JS_FRAMES = 1024; +using JsFrameBuffer = JS::ProfilingFrameIterator::Frame[MAX_JS_FRAMES]; + +// All functions in this file can run on multiple threads unless they have an +// NS_IsMainThread() assertion. + +// This class contains the profiler's core global state, i.e. that which is +// valid even when the profiler is not active. Most profile operations can't do +// anything useful when this class is not instantiated, so we release-assert +// its non-nullness in all such operations. +// +// Accesses to CorePS are guarded by gPSMutex. Getters and setters take a +// PSAutoLock reference as an argument as proof that the gPSMutex is currently +// locked. This makes it clear when gPSMutex is locked and helps avoid +// accidental unlocked accesses to global state. There are ways to circumvent +// this mechanism, but please don't do so without *very* good reason and a +// detailed explanation. +// +// The exceptions to this rule: +// +// - mProcessStartTime, because it's immutable; +// +// - each thread's RacyRegisteredThread object is accessible without locking via +// TLSRegisteredThread::RacyRegisteredThread(). +class CorePS { + private: + CorePS() + : mProcessStartTime(TimeStamp::ProcessCreation()), + // This needs its own mutex, because it is used concurrently from + // functions guarded by gPSMutex as well as others without safety (e.g., + // profiler_add_marker). It is *not* used inside the critical section of + // the sampler, because mutexes cannot be used there. + mCoreBuffer(ProfileChunkedBuffer::ThreadSafety::WithMutex) +#ifdef USE_LUL_STACKWALK + , + mLul(nullptr) +#endif + { + MOZ_ASSERT(NS_IsMainThread(), + "CorePS must be created from the main thread"); + } + + ~CorePS() {} + + public: + static void Create(PSLockRef aLock) { + MOZ_ASSERT(!sInstance); + sInstance = new CorePS(); + } + + static void Destroy(PSLockRef aLock) { + MOZ_ASSERT(sInstance); + delete sInstance; + sInstance = nullptr; + } + + // Unlike ActivePS::Exists(), CorePS::Exists() can be called without gPSMutex + // being locked. This is because CorePS is instantiated so early on the main + // thread that we don't have to worry about it being racy. + static bool Exists() { return !!sInstance; } + + static void AddSizeOf(PSLockRef, MallocSizeOf aMallocSizeOf, + size_t& aProfSize, size_t& aLulSize) { + MOZ_ASSERT(sInstance); + + aProfSize += aMallocSizeOf(sInstance); + + for (auto& registeredThread : sInstance->mRegisteredThreads) { + aProfSize += registeredThread->SizeOfIncludingThis(aMallocSizeOf); + } + + for (auto& registeredPage : sInstance->mRegisteredPages) { + aProfSize += registeredPage->SizeOfIncludingThis(aMallocSizeOf); + } + + // Measurement of the following things may be added later if DMD finds it + // is worthwhile: + // - CorePS::mRegisteredThreads itself (its elements' children are + // measured above) + // - CorePS::mRegisteredPages itself (its elements' children are + // measured above) + // - CorePS::mInterposeObserver + +#if defined(USE_LUL_STACKWALK) + if (sInstance->mLul) { + aLulSize += sInstance->mLul->SizeOfIncludingThis(aMallocSizeOf); + } +#endif + } + + // No PSLockRef is needed for this field because it's immutable. + PS_GET_LOCKLESS(TimeStamp, ProcessStartTime) + + // No PSLockRef is needed for this field because it's thread-safe. + PS_GET_LOCKLESS(ProfileChunkedBuffer&, CoreBuffer) + + PS_GET(const Vector<UniquePtr<RegisteredThread>>&, RegisteredThreads) + + PS_GET(JsFrameBuffer&, JsFrames) + + static void AppendRegisteredThread( + PSLockRef, UniquePtr<RegisteredThread>&& aRegisteredThread) { + MOZ_ASSERT(sInstance); + MOZ_RELEASE_ASSERT( + sInstance->mRegisteredThreads.append(std::move(aRegisteredThread))); + } + + static void RemoveRegisteredThread(PSLockRef, + RegisteredThread* aRegisteredThread) { + MOZ_ASSERT(sInstance); + // Remove aRegisteredThread from mRegisteredThreads. + for (UniquePtr<RegisteredThread>& rt : sInstance->mRegisteredThreads) { + if (rt.get() == aRegisteredThread) { + sInstance->mRegisteredThreads.erase(&rt); + return; + } + } + } + + PS_GET(Vector<RefPtr<PageInformation>>&, RegisteredPages) + + static void AppendRegisteredPage(PSLockRef, + RefPtr<PageInformation>&& aRegisteredPage) { + MOZ_ASSERT(sInstance); + struct RegisteredPageComparator { + PageInformation* aA; + bool operator()(PageInformation* aB) const { return aA->Equals(aB); } + }; + + auto foundPageIter = std::find_if( + sInstance->mRegisteredPages.begin(), sInstance->mRegisteredPages.end(), + RegisteredPageComparator{aRegisteredPage.get()}); + + if (foundPageIter != sInstance->mRegisteredPages.end()) { + if ((*foundPageIter)->Url().EqualsLiteral("about:blank")) { + // When a BrowsingContext is loaded, the first url loaded in it will be + // about:blank, and if the principal matches, the first document loaded + // in it will share an inner window. That's why we should delete the + // intermittent about:blank if they share the inner window. + sInstance->mRegisteredPages.erase(foundPageIter); + } else { + // Do not register the same page again. + return; + } + } + + MOZ_RELEASE_ASSERT( + sInstance->mRegisteredPages.append(std::move(aRegisteredPage))); + } + + static void RemoveRegisteredPage(PSLockRef, + uint64_t aRegisteredInnerWindowID) { + MOZ_ASSERT(sInstance); + // Remove RegisteredPage from mRegisteredPages by given inner window ID. + sInstance->mRegisteredPages.eraseIf([&](const RefPtr<PageInformation>& rd) { + return rd->InnerWindowID() == aRegisteredInnerWindowID; + }); + } + + static void ClearRegisteredPages(PSLockRef) { + MOZ_ASSERT(sInstance); + sInstance->mRegisteredPages.clear(); + } + + PS_GET(const Vector<BaseProfilerCount*>&, Counters) + + static void AppendCounter(PSLockRef, BaseProfilerCount* aCounter) { + MOZ_ASSERT(sInstance); + // we don't own the counter; they may be stored in static objects + MOZ_RELEASE_ASSERT(sInstance->mCounters.append(aCounter)); + } + + static void RemoveCounter(PSLockRef, BaseProfilerCount* aCounter) { + // we may be called to remove a counter after the profiler is stopped or + // late in shutdown. + if (sInstance) { + auto* counter = std::find(sInstance->mCounters.begin(), + sInstance->mCounters.end(), aCounter); + MOZ_RELEASE_ASSERT(counter != sInstance->mCounters.end()); + sInstance->mCounters.erase(counter); + } + } + +#ifdef USE_LUL_STACKWALK + static lul::LUL* Lul(PSLockRef) { + MOZ_ASSERT(sInstance); + return sInstance->mLul.get(); + } + static void SetLul(PSLockRef, UniquePtr<lul::LUL> aLul) { + MOZ_ASSERT(sInstance); + sInstance->mLul = std::move(aLul); + } +#endif + + PS_GET_AND_SET(const nsACString&, ProcessName) + PS_GET_AND_SET(const nsACString&, ETLDplus1) + + private: + // The singleton instance + static CorePS* sInstance; + + // The time that the process started. + const TimeStamp mProcessStartTime; + + // The thread-safe blocks-oriented buffer into which all profiling data is + // recorded. + // ActivePS controls the lifetime of the underlying contents buffer: When + // ActivePS does not exist, mCoreBuffer is empty and rejects all reads&writes; + // see ActivePS for further details. + // Note: This needs to live here outside of ActivePS, because some producers + // are indirectly controlled (e.g., by atomic flags) and therefore may still + // attempt to write some data shortly after ActivePS has shutdown and deleted + // the underlying buffer in memory. + ProfileChunkedBuffer mCoreBuffer; + + // Info on all the registered threads. + // ThreadIds in mRegisteredThreads are unique. + Vector<UniquePtr<RegisteredThread>> mRegisteredThreads; + + // Info on all the registered pages. + // InnerWindowIDs in mRegisteredPages are unique. + Vector<RefPtr<PageInformation>> mRegisteredPages; + + // Non-owning pointers to all active counters + Vector<BaseProfilerCount*> mCounters; + +#ifdef USE_LUL_STACKWALK + // LUL's state. Null prior to the first activation, non-null thereafter. + UniquePtr<lul::LUL> mLul; +#endif + + // Process name, provided by child process initialization code. + nsAutoCString mProcessName; + // Private name, provided by child process initialization code (eTLD+1 in + // fission) + nsAutoCString mETLDplus1; + + // This memory buffer is used by the MergeStacks mechanism. Previously it was + // stack allocated, but this led to a stack overflow, as it was too much + // memory. Here the buffer can be pre-allocated, and shared with the + // MergeStacks feature as needed. MergeStacks is only run while holding the + // lock, so it is safe to have only one instance allocated for all of the + // threads. + JsFrameBuffer mJsFrames; +}; + +CorePS* CorePS::sInstance = nullptr; + +ProfileChunkedBuffer& profiler_get_core_buffer() { + MOZ_ASSERT(CorePS::Exists()); + return CorePS::CoreBuffer(); +} + +class SamplerThread; + +static SamplerThread* NewSamplerThread(PSLockRef aLock, uint32_t aGeneration, + double aInterval); + +struct LiveProfiledThreadData { + RegisteredThread* mRegisteredThread; + UniquePtr<ProfiledThreadData> mProfiledThreadData; +}; + +// The buffer size is provided as a number of "entries", this is their size in +// bytes. +constexpr static uint32_t scBytesPerEntry = 8; + +// This class contains the profiler's global state that is valid only when the +// profiler is active. When not instantiated, the profiler is inactive. +// +// Accesses to ActivePS are guarded by gPSMutex, in much the same fashion as +// CorePS. +// +class ActivePS { + private: + // We need to decide how many chunks of what size we want to fit in the given + // total maximum capacity for this process, in the (likely) context of + // multiple processes doing the same choice and having an inter-process + // mechanism to control the overal memory limit. + + // Minimum chunk size allowed, enough for at least one stack. + constexpr static uint32_t scMinimumChunkSize = + 2 * ProfileBufferChunkManager::scExpectedMaximumStackSize; + + // Ideally we want at least 2 unreleased chunks to work with (1 current and 1 + // next), and 2 released chunks (so that one can be recycled when old, leaving + // one with some data). + constexpr static uint32_t scMinimumNumberOfChunks = 4; + + // And we want to limit chunks to a maximum size, which is a compromise + // between: + // - A big size, which helps with reducing the rate of allocations and IPCs. + // - A small size, which helps with equalizing the duration of recorded data + // (as the inter-process controller will discard the oldest chunks in all + // Firefox processes). + constexpr static uint32_t scMaximumChunkSize = 1024 * 1024; + + public: + // We should be able to store at least the minimum number of the smallest- + // possible chunks. + constexpr static uint32_t scMinimumBufferSize = + scMinimumNumberOfChunks * scMinimumChunkSize; + // Note: Keep in sync with GeckoThread.maybeStartGeckoProfiler: + // https://searchfox.org/mozilla-central/source/mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoThread.java + constexpr static uint32_t scMinimumBufferEntries = + scMinimumBufferSize / scBytesPerEntry; + + // Limit to 2GiB. + constexpr static uint32_t scMaximumBufferSize = 2u * 1024u * 1024u * 1024u; + constexpr static uint32_t scMaximumBufferEntries = + scMaximumBufferSize / scBytesPerEntry; + + constexpr static uint32_t ClampToAllowedEntries(uint32_t aEntries) { + if (aEntries <= scMinimumBufferEntries) { + return scMinimumBufferEntries; + } + if (aEntries >= scMaximumBufferEntries) { + return scMaximumBufferEntries; + } + return aEntries; + } + + private: + constexpr static uint32_t ChunkSizeForEntries(uint32_t aEntries) { + return uint32_t(std::min(size_t(ClampToAllowedEntries(aEntries)) * + scBytesPerEntry / scMinimumNumberOfChunks, + size_t(scMaximumChunkSize))); + } + + static uint32_t AdjustFeatures(uint32_t aFeatures, uint32_t aFilterCount) { + // Filter out any features unavailable in this platform/configuration. + aFeatures &= AvailableFeatures(); + + // Always enable ProfilerFeature::Threads if we have a filter, because + // users sometimes ask to filter by a list of threads but forget to + // explicitly specify ProfilerFeature::Threads. + if (aFilterCount > 0) { + aFeatures |= ProfilerFeature::Threads; + } + + // Some features imply others. + if (aFeatures & ProfilerFeature::FileIOAll) { + aFeatures |= ProfilerFeature::MainThreadIO | ProfilerFeature::FileIO; + } else if (aFeatures & ProfilerFeature::FileIO) { + aFeatures |= ProfilerFeature::MainThreadIO; + } + + return aFeatures; + } + + ActivePS(PSLockRef aLock, PowerOfTwo32 aCapacity, double aInterval, + uint32_t aFeatures, const char** aFilters, uint32_t aFilterCount, + uint64_t aActiveBrowsingContextID, const Maybe<double>& aDuration) + : mGeneration(sNextGeneration++), + mCapacity(aCapacity), + mDuration(aDuration), + mInterval(aInterval), + mFeatures(AdjustFeatures(aFeatures, aFilterCount)), + mActiveBrowsingContextID(aActiveBrowsingContextID), + mProfileBufferChunkManager( + size_t(ClampToAllowedEntries(aCapacity.Value())) * scBytesPerEntry, + ChunkSizeForEntries(aCapacity.Value())), + mProfileBuffer([this]() -> ProfileChunkedBuffer& { + CorePS::CoreBuffer().SetChunkManager(mProfileBufferChunkManager); + return CorePS::CoreBuffer(); + }()), + // The new sampler thread doesn't start sampling immediately because the + // main loop within Run() is blocked until this function's caller + // unlocks gPSMutex. + mSamplerThread(NewSamplerThread(aLock, mGeneration, aInterval)), + mInterposeObserver((ProfilerFeature::HasMainThreadIO(aFeatures) || + ProfilerFeature::HasFileIO(aFeatures) || + ProfilerFeature::HasFileIOAll(aFeatures)) + ? new ProfilerIOInterposeObserver() + : nullptr), + mIsPaused(false), + mIsSamplingPaused(false) +#if defined(GP_OS_linux) || defined(GP_OS_freebsd) + , + mWasSamplingPaused(false) +#endif + { + // Deep copy aFilters. + MOZ_ALWAYS_TRUE(mFilters.resize(aFilterCount)); + for (uint32_t i = 0; i < aFilterCount; ++i) { + mFilters[i] = aFilters[i]; + } + +#if !defined(RELEASE_OR_BETA) + if (mInterposeObserver) { + // We need to register the observer on the main thread, because we want + // to observe IO that happens on the main thread. + // IOInterposer needs to be initialized before calling + // IOInterposer::Register or our observer will be silently dropped. + if (NS_IsMainThread()) { + IOInterposer::Init(); + IOInterposer::Register(IOInterposeObserver::OpAll, mInterposeObserver); + } else { + RefPtr<ProfilerIOInterposeObserver> observer = mInterposeObserver; + NS_DispatchToMainThread( + NS_NewRunnableFunction("ActivePS::ActivePS", [=]() { + IOInterposer::Init(); + IOInterposer::Register(IOInterposeObserver::OpAll, observer); + })); + } + } +#endif + } + + ~ActivePS() { +#if !defined(RELEASE_OR_BETA) + if (mInterposeObserver) { + // We need to unregister the observer on the main thread, because that's + // where we've registered it. + if (NS_IsMainThread()) { + IOInterposer::Unregister(IOInterposeObserver::OpAll, + mInterposeObserver); + } else { + RefPtr<ProfilerIOInterposeObserver> observer = mInterposeObserver; + NS_DispatchToMainThread( + NS_NewRunnableFunction("ActivePS::~ActivePS", [=]() { + IOInterposer::Unregister(IOInterposeObserver::OpAll, observer); + })); + } + } +#endif + CorePS::CoreBuffer().ResetChunkManager(); + } + + bool ThreadSelected(const char* aThreadName) { + if (mFilters.empty()) { + return true; + } + + std::string name = aThreadName; + std::transform(name.begin(), name.end(), name.begin(), ::tolower); + + for (uint32_t i = 0; i < mFilters.length(); ++i) { + std::string filter = mFilters[i]; + + if (filter == "*") { + return true; + } + + std::transform(filter.begin(), filter.end(), filter.begin(), ::tolower); + + // Crude, non UTF-8 compatible, case insensitive substring search + if (name.find(filter) != std::string::npos) { + return true; + } + + // If the filter starts with pid:, check for a pid match + if (filter.find("pid:") == 0) { + std::string mypid = std::to_string(profiler_current_process_id()); + if (filter.compare(4, std::string::npos, mypid) == 0) { + return true; + } + } + } + + return false; + } + + public: + static void Create(PSLockRef aLock, PowerOfTwo32 aCapacity, double aInterval, + uint32_t aFeatures, const char** aFilters, + uint32_t aFilterCount, uint64_t aActiveBrowsingContextID, + const Maybe<double>& aDuration) { + MOZ_ASSERT(!sInstance); + sInstance = new ActivePS(aLock, aCapacity, aInterval, aFeatures, aFilters, + aFilterCount, aActiveBrowsingContextID, aDuration); + } + + [[nodiscard]] static SamplerThread* Destroy(PSLockRef aLock) { + MOZ_ASSERT(sInstance); + auto samplerThread = sInstance->mSamplerThread; + delete sInstance; + sInstance = nullptr; + + return samplerThread; + } + + static bool Exists(PSLockRef) { return !!sInstance; } + + static bool Equals(PSLockRef, PowerOfTwo32 aCapacity, + const Maybe<double>& aDuration, double aInterval, + uint32_t aFeatures, const char** aFilters, + uint32_t aFilterCount, uint64_t aActiveBrowsingContextID) { + MOZ_ASSERT(sInstance); + if (sInstance->mCapacity != aCapacity || + sInstance->mDuration != aDuration || + sInstance->mInterval != aInterval || + sInstance->mFeatures != aFeatures || + sInstance->mFilters.length() != aFilterCount || + sInstance->mActiveBrowsingContextID != aActiveBrowsingContextID) { + return false; + } + + for (uint32_t i = 0; i < sInstance->mFilters.length(); ++i) { + if (strcmp(sInstance->mFilters[i].c_str(), aFilters[i]) != 0) { + return false; + } + } + return true; + } + + static size_t SizeOf(PSLockRef, MallocSizeOf aMallocSizeOf) { + MOZ_ASSERT(sInstance); + + size_t n = aMallocSizeOf(sInstance); + + n += sInstance->mProfileBuffer.SizeOfExcludingThis(aMallocSizeOf); + + // Measurement of the following members may be added later if DMD finds it + // is worthwhile: + // - mLiveProfiledThreads (both the array itself, and the contents) + // - mDeadProfiledThreads (both the array itself, and the contents) + // + + return n; + } + + static bool ShouldProfileThread(PSLockRef aLock, ThreadInfo* aInfo) { + MOZ_ASSERT(sInstance); + return ((aInfo->IsMainThread() || FeatureThreads(aLock)) && + sInstance->ThreadSelected(aInfo->Name())); + } + + [[nodiscard]] static bool AppendPostSamplingCallback( + PSLockRef, PostSamplingCallback&& aCallback); + + // Writes out the current active configuration of the profile. + static void WriteActiveConfiguration( + PSLockRef aLock, JSONWriter& aWriter, + const Span<const char>& aPropertyName = MakeStringSpan("")) { + if (!sInstance) { + if (!aPropertyName.empty()) { + aWriter.NullProperty(aPropertyName); + } else { + aWriter.NullElement(); + } + return; + }; + + if (!aPropertyName.empty()) { + aWriter.StartObjectProperty(aPropertyName); + } else { + aWriter.StartObjectElement(); + } + + { + aWriter.StartArrayProperty("features", aWriter.SingleLineStyle); +#define WRITE_ACTIVE_FEATURES(n_, str_, Name_, desc_) \ + if (profiler_feature_active(ProfilerFeature::Name_)) { \ + aWriter.StringElement(str_); \ + } + + PROFILER_FOR_EACH_FEATURE(WRITE_ACTIVE_FEATURES) +#undef WRITE_ACTIVE_FEATURES + aWriter.EndArray(); + } + { + aWriter.StartArrayProperty("threads", aWriter.SingleLineStyle); + for (const auto& filter : sInstance->mFilters) { + aWriter.StringElement(filter); + } + aWriter.EndArray(); + } + { + // Now write all the simple values. + + // The interval is also available on profile.meta.interval + aWriter.DoubleProperty("interval", sInstance->mInterval); + aWriter.IntProperty("capacity", sInstance->mCapacity.Value()); + if (sInstance->mDuration) { + aWriter.DoubleProperty("duration", sInstance->mDuration.value()); + } + // Here, we are converting uint64_t to double. Browsing Context IDs are + // being created using `nsContentUtils::GenerateProcessSpecificId`, which + // is specifically designed to only use 53 of the 64 bits to be lossless + // when passed into and out of JS as a double. + aWriter.DoubleProperty("activeBrowsingContextID", + sInstance->mActiveBrowsingContextID); + } + aWriter.EndObject(); + } + + PS_GET(uint32_t, Generation) + + PS_GET(PowerOfTwo32, Capacity) + + PS_GET(Maybe<double>, Duration) + + PS_GET(double, Interval) + + PS_GET(uint32_t, Features) + + PS_GET(uint64_t, ActiveBrowsingContextID) + +#define PS_GET_FEATURE(n_, str_, Name_, desc_) \ + static bool Feature##Name_(PSLockRef) { \ + MOZ_ASSERT(sInstance); \ + return ProfilerFeature::Has##Name_(sInstance->mFeatures); \ + } + + PROFILER_FOR_EACH_FEATURE(PS_GET_FEATURE) + +#undef PS_GET_FEATURE + + static uint32_t JSFlags(PSLockRef aLock) { + uint32_t Flags = 0; + Flags |= + FeatureJS(aLock) ? uint32_t(JSInstrumentationFlags::StackSampling) : 0; + Flags |= FeatureJSTracer(aLock) + ? uint32_t(JSInstrumentationFlags::TraceLogging) + : 0; + Flags |= FeatureJSAllocations(aLock) + ? uint32_t(JSInstrumentationFlags::Allocations) + : 0; + return Flags; + } + + PS_GET(const Vector<std::string>&, Filters) + + // Not using PS_GET, because only the "Controlled" interface of + // `mProfileBufferChunkManager` should be exposed here. + static ProfileBufferControlledChunkManager& ControlledChunkManager( + PSLockRef) { + MOZ_ASSERT(sInstance); + return sInstance->mProfileBufferChunkManager; + } + + static void FulfillChunkRequests(PSLockRef) { + MOZ_ASSERT(sInstance); + sInstance->mProfileBufferChunkManager.FulfillChunkRequests(); + } + + static ProfileBuffer& Buffer(PSLockRef) { + MOZ_ASSERT(sInstance); + return sInstance->mProfileBuffer; + } + + static const Vector<LiveProfiledThreadData>& LiveProfiledThreads(PSLockRef) { + MOZ_ASSERT(sInstance); + return sInstance->mLiveProfiledThreads; + } + + // Returns an array containing (RegisteredThread*, ProfiledThreadData*) pairs + // for all threads that should be included in a profile, both for threads + // that are still registered, and for threads that have been unregistered but + // still have data in the buffer. + // For threads that have already been unregistered, the RegisteredThread + // pointer will be null. + // The returned array is sorted by thread register time. + // Do not hold on to the return value across thread registration or profiler + // restarts. + static Vector<std::pair<RegisteredThread*, ProfiledThreadData*>> + ProfiledThreads(PSLockRef) { + MOZ_ASSERT(sInstance); + Vector<std::pair<RegisteredThread*, ProfiledThreadData*>> array; + MOZ_RELEASE_ASSERT( + array.initCapacity(sInstance->mLiveProfiledThreads.length() + + sInstance->mDeadProfiledThreads.length())); + for (auto& t : sInstance->mLiveProfiledThreads) { + MOZ_RELEASE_ASSERT(array.append( + std::make_pair(t.mRegisteredThread, t.mProfiledThreadData.get()))); + } + for (auto& t : sInstance->mDeadProfiledThreads) { + MOZ_RELEASE_ASSERT( + array.append(std::make_pair((RegisteredThread*)nullptr, t.get()))); + } + + std::sort(array.begin(), array.end(), + [](const std::pair<RegisteredThread*, ProfiledThreadData*>& a, + const std::pair<RegisteredThread*, ProfiledThreadData*>& b) { + return a.second->Info()->RegisterTime() < + b.second->Info()->RegisterTime(); + }); + return array; + } + + static Vector<RefPtr<PageInformation>> ProfiledPages(PSLockRef aLock) { + MOZ_ASSERT(sInstance); + Vector<RefPtr<PageInformation>> array; + for (auto& d : CorePS::RegisteredPages(aLock)) { + MOZ_RELEASE_ASSERT(array.append(d)); + } + for (auto& d : sInstance->mDeadProfiledPages) { + MOZ_RELEASE_ASSERT(array.append(d)); + } + // We don't need to sort the pages like threads since we won't show them + // as a list. + return array; + } + + // Do a linear search through mLiveProfiledThreads to find the + // ProfiledThreadData object for a RegisteredThread. + static ProfiledThreadData* GetProfiledThreadData( + PSLockRef, RegisteredThread* aRegisteredThread) { + MOZ_ASSERT(sInstance); + for (const LiveProfiledThreadData& thread : + sInstance->mLiveProfiledThreads) { + if (thread.mRegisteredThread == aRegisteredThread) { + return thread.mProfiledThreadData.get(); + } + } + return nullptr; + } + + static ProfiledThreadData* AddLiveProfiledThread( + PSLockRef, RegisteredThread* aRegisteredThread, + UniquePtr<ProfiledThreadData>&& aProfiledThreadData) { + MOZ_ASSERT(sInstance); + MOZ_RELEASE_ASSERT( + sInstance->mLiveProfiledThreads.append(LiveProfiledThreadData{ + aRegisteredThread, std::move(aProfiledThreadData)})); + + // Return a weak pointer to the ProfiledThreadData object. + return sInstance->mLiveProfiledThreads.back().mProfiledThreadData.get(); + } + + static void UnregisterThread(PSLockRef aLockRef, + RegisteredThread* aRegisteredThread) { + MOZ_ASSERT(sInstance); + + DiscardExpiredDeadProfiledThreads(aLockRef); + + // Find the right entry in the mLiveProfiledThreads array and remove the + // element, moving the ProfiledThreadData object for the thread into the + // mDeadProfiledThreads array. + // The thread's RegisteredThread object gets destroyed here. + for (size_t i = 0; i < sInstance->mLiveProfiledThreads.length(); i++) { + LiveProfiledThreadData& thread = sInstance->mLiveProfiledThreads[i]; + if (thread.mRegisteredThread == aRegisteredThread) { + thread.mProfiledThreadData->NotifyUnregistered( + sInstance->mProfileBuffer.BufferRangeEnd()); + MOZ_RELEASE_ASSERT(sInstance->mDeadProfiledThreads.append( + std::move(thread.mProfiledThreadData))); + sInstance->mLiveProfiledThreads.erase( + &sInstance->mLiveProfiledThreads[i]); + return; + } + } + } + + PS_GET_AND_SET(bool, IsPaused) + + // True if sampling is paused (though generic `SetIsPaused()` or specific + // `SetIsSamplingPaused()`). + static bool IsSamplingPaused(PSLockRef lock) { + MOZ_ASSERT(sInstance); + return IsPaused(lock) || sInstance->mIsSamplingPaused; + } + + static void SetIsSamplingPaused(PSLockRef, bool aIsSamplingPaused) { + MOZ_ASSERT(sInstance); + sInstance->mIsSamplingPaused = aIsSamplingPaused; + } + +#if defined(GP_OS_linux) || defined(GP_OS_freebsd) + PS_GET_AND_SET(bool, WasSamplingPaused) +#endif + + static void DiscardExpiredDeadProfiledThreads(PSLockRef) { + MOZ_ASSERT(sInstance); + uint64_t bufferRangeStart = sInstance->mProfileBuffer.BufferRangeStart(); + // Discard any dead threads that were unregistered before bufferRangeStart. + sInstance->mDeadProfiledThreads.eraseIf( + [bufferRangeStart]( + const UniquePtr<ProfiledThreadData>& aProfiledThreadData) { + Maybe<uint64_t> bufferPosition = + aProfiledThreadData->BufferPositionWhenUnregistered(); + MOZ_RELEASE_ASSERT(bufferPosition, + "should have unregistered this thread"); + return *bufferPosition < bufferRangeStart; + }); + } + + static void UnregisterPage(PSLockRef aLock, + uint64_t aRegisteredInnerWindowID) { + MOZ_ASSERT(sInstance); + auto& registeredPages = CorePS::RegisteredPages(aLock); + for (size_t i = 0; i < registeredPages.length(); i++) { + RefPtr<PageInformation>& page = registeredPages[i]; + if (page->InnerWindowID() == aRegisteredInnerWindowID) { + page->NotifyUnregistered(sInstance->mProfileBuffer.BufferRangeEnd()); + MOZ_RELEASE_ASSERT( + sInstance->mDeadProfiledPages.append(std::move(page))); + registeredPages.erase(®isteredPages[i--]); + } + } + } + + static void DiscardExpiredPages(PSLockRef) { + MOZ_ASSERT(sInstance); + uint64_t bufferRangeStart = sInstance->mProfileBuffer.BufferRangeStart(); + // Discard any dead pages that were unregistered before + // bufferRangeStart. + sInstance->mDeadProfiledPages.eraseIf( + [bufferRangeStart](const RefPtr<PageInformation>& aProfiledPage) { + Maybe<uint64_t> bufferPosition = + aProfiledPage->BufferPositionWhenUnregistered(); + MOZ_RELEASE_ASSERT(bufferPosition, + "should have unregistered this page"); + return *bufferPosition < bufferRangeStart; + }); + } + + static void ClearUnregisteredPages(PSLockRef) { + MOZ_ASSERT(sInstance); + sInstance->mDeadProfiledPages.clear(); + } + + static void ClearExpiredExitProfiles(PSLockRef) { + MOZ_ASSERT(sInstance); + uint64_t bufferRangeStart = sInstance->mProfileBuffer.BufferRangeStart(); + // Discard exit profiles that were gathered before our buffer RangeStart. + // If we have started to overwrite our data from when the Base profile was + // added, we should get rid of that Base profile because it's now older than + // our oldest Gecko profile data. + // + // When adding: (In practice the starting buffer should be empty) + // v Start == End + // | <-- Buffer range, initially empty. + // ^ mGeckoIndexWhenBaseProfileAdded < Start FALSE -> keep it + // + // Later, still in range: + // v Start v End + // |=========| <-- Buffer range growing. + // ^ mGeckoIndexWhenBaseProfileAdded < Start FALSE -> keep it + // + // Even later, now out of range: + // v Start v End + // |============| <-- Buffer range full and sliding. + // ^ mGeckoIndexWhenBaseProfileAdded < Start TRUE! -> Discard it + if (sInstance->mBaseProfileThreads && + sInstance->mGeckoIndexWhenBaseProfileAdded + .ConvertToProfileBufferIndex() < + CorePS::CoreBuffer().GetState().mRangeStart) { + DEBUG_LOG("ClearExpiredExitProfiles() - Discarding base profile %p", + sInstance->mBaseProfileThreads.get()); + sInstance->mBaseProfileThreads.reset(); + } + sInstance->mExitProfiles.eraseIf( + [bufferRangeStart](const ExitProfile& aExitProfile) { + return aExitProfile.mBufferPositionAtGatherTime < bufferRangeStart; + }); + } + + static void AddBaseProfileThreads(PSLockRef aLock, + UniquePtr<char[]> aBaseProfileThreads) { + MOZ_ASSERT(sInstance); + DEBUG_LOG("AddBaseProfileThreads(%p)", aBaseProfileThreads.get()); + sInstance->mBaseProfileThreads = std::move(aBaseProfileThreads); + sInstance->mGeckoIndexWhenBaseProfileAdded = + ProfileBufferBlockIndex::CreateFromProfileBufferIndex( + CorePS::CoreBuffer().GetState().mRangeEnd); + } + + static UniquePtr<char[]> MoveBaseProfileThreads(PSLockRef aLock) { + MOZ_ASSERT(sInstance); + + ClearExpiredExitProfiles(aLock); + + DEBUG_LOG("MoveBaseProfileThreads() - Consuming base profile %p", + sInstance->mBaseProfileThreads.get()); + return std::move(sInstance->mBaseProfileThreads); + } + + static void AddExitProfile(PSLockRef aLock, const nsCString& aExitProfile) { + MOZ_ASSERT(sInstance); + + ClearExpiredExitProfiles(aLock); + + MOZ_RELEASE_ASSERT(sInstance->mExitProfiles.append( + ExitProfile{aExitProfile, sInstance->mProfileBuffer.BufferRangeEnd()})); + } + + static Vector<nsCString> MoveExitProfiles(PSLockRef aLock) { + MOZ_ASSERT(sInstance); + + ClearExpiredExitProfiles(aLock); + + Vector<nsCString> profiles; + MOZ_RELEASE_ASSERT( + profiles.initCapacity(sInstance->mExitProfiles.length())); + for (auto& profile : sInstance->mExitProfiles) { + MOZ_RELEASE_ASSERT(profiles.append(std::move(profile.mJSON))); + } + sInstance->mExitProfiles.clear(); + return profiles; + } + + private: + // The singleton instance. + static ActivePS* sInstance; + + // We need to track activity generations. If we didn't we could have the + // following scenario. + // + // - profiler_stop() locks gPSMutex, de-instantiates ActivePS, unlocks + // gPSMutex, deletes the SamplerThread (which does a join). + // + // - profiler_start() runs on a different thread, locks gPSMutex, + // re-instantiates ActivePS, unlocks gPSMutex -- all before the join + // completes. + // + // - SamplerThread::Run() locks gPSMutex, sees that ActivePS is instantiated, + // and continues as if the start/stop pair didn't occur. Also + // profiler_stop() is stuck, unable to finish. + // + // By checking ActivePS *and* the generation, we can avoid this scenario. + // sNextGeneration is used to track the next generation number; it is static + // because it must persist across different ActivePS instantiations. + const uint32_t mGeneration; + static uint32_t sNextGeneration; + + // The maximum number of entries in mProfileBuffer. + const PowerOfTwo32 mCapacity; + + // The maximum duration of entries in mProfileBuffer, in seconds. + const Maybe<double> mDuration; + + // The interval between samples, measured in milliseconds. + const double mInterval; + + // The profile features that are enabled. + const uint32_t mFeatures; + + // Substrings of names of threads we want to profile. + Vector<std::string> mFilters; + + // Browsing Context ID of the active active browser screen's active tab. + // It's being used to determine the profiled tab. It's "0" if we failed to + // get the ID. + const uint64_t mActiveBrowsingContextID; + + // The chunk manager used by `mProfileBuffer` below. + ProfileBufferChunkManagerWithLocalLimit mProfileBufferChunkManager; + + // The buffer into which all samples are recorded. + ProfileBuffer mProfileBuffer; + + // ProfiledThreadData objects for any threads that were profiled at any point + // during this run of the profiler: + // - mLiveProfiledThreads contains all threads that are still registered, and + // - mDeadProfiledThreads contains all threads that have already been + // unregistered but for which there is still data in the profile buffer. + Vector<LiveProfiledThreadData> mLiveProfiledThreads; + Vector<UniquePtr<ProfiledThreadData>> mDeadProfiledThreads; + + // Info on all the dead pages. + // Registered pages are being moved to this array after unregistration. + // We are keeping them in case we need them in the profile data. + // We are removing them when we ensure that we won't need them anymore. + Vector<RefPtr<PageInformation>> mDeadProfiledPages; + + // The current sampler thread. This class is not responsible for destroying + // the SamplerThread object; the Destroy() method returns it so the caller + // can destroy it. + SamplerThread* const mSamplerThread; + + // The interposer that records main thread I/O. + RefPtr<ProfilerIOInterposeObserver> mInterposeObserver; + + // Is the profiler fully paused? + bool mIsPaused; + + // Is the profiler periodic sampling paused? + bool mIsSamplingPaused; + +#if defined(GP_OS_linux) || defined(GP_OS_freebsd) + // Used to record whether the sampler was paused just before forking. False + // at all times except just before/after forking. + bool mWasSamplingPaused; +#endif + + // Optional startup profile thread array from BaseProfiler. + UniquePtr<char[]> mBaseProfileThreads; + ProfileBufferBlockIndex mGeckoIndexWhenBaseProfileAdded; + + struct ExitProfile { + nsCString mJSON; + uint64_t mBufferPositionAtGatherTime; + }; + Vector<ExitProfile> mExitProfiles; +}; + +ActivePS* ActivePS::sInstance = nullptr; +uint32_t ActivePS::sNextGeneration = 0; + +#undef PS_GET +#undef PS_GET_LOCKLESS +#undef PS_GET_AND_SET + +// The mutex that guards accesses to CorePS and ActivePS. +static PSMutex gPSMutex; + +Atomic<uint32_t, MemoryOrdering::Relaxed> RacyFeatures::sActiveAndFeatures(0); + +// Each live thread has a RegisteredThread, and we store a reference to it in +// TLS. This class encapsulates that TLS, and also handles the associated +// profiling stack used by AutoProfilerLabel. +class TLSRegisteredThread { + public: + // This should only be called once before any other access. + // In this case it's called from `profiler_init()` on the main thread, before + // the main thread registers itself. + static void Init() { + MOZ_ASSERT(sState == State::Uninitialized, "Already initialized"); + AutoProfilerLabel::ProfilingStackOwnerTLS::Init(); + MOZ_ASSERT( + AutoProfilerLabel::ProfilingStackOwnerTLS::sState != + AutoProfilerLabel::ProfilingStackOwnerTLS::State::Uninitialized, + "Unexpected ProfilingStackOwnerTLS::sState after " + "ProfilingStackOwnerTLS::Init()"); + sState = + (AutoProfilerLabel::ProfilingStackOwnerTLS::sState == + AutoProfilerLabel::ProfilingStackOwnerTLS::State::Initialized && + sRegisteredThread.init()) + ? State::Initialized + : State::Unavailable; + } + + static bool IsTLSInited() { + MOZ_ASSERT(sState != State::Uninitialized, + "TLSRegisteredThread should only be accessed after Init()"); + return sState == State::Initialized; + } + + // Get the entire RegisteredThread. Accesses are guarded by gPSMutex. + static class RegisteredThread* RegisteredThread(PSLockRef) { + if (!IsTLSInited()) { + return nullptr; + } + return sRegisteredThread.get(); + } + + // Get only the RacyRegisteredThread. Accesses are not guarded by gPSMutex. + static class RacyRegisteredThread* RacyRegisteredThread() { + if (!IsTLSInited()) { + return nullptr; + } + class RegisteredThread* registeredThread = sRegisteredThread.get(); + return registeredThread ? ®isteredThread->RacyRegisteredThread() + : nullptr; + } + + // Get only the ProfilingStack. Accesses are not guarded by gPSMutex. + // RacyRegisteredThread() can also be used to get the ProfilingStack, but that + // is marginally slower because it requires an extra pointer indirection. + static ProfilingStack* Stack() { + if (!IsTLSInited()) { + return nullptr; + } + ProfilingStackOwner* profilingStackOwner = + AutoProfilerLabel::ProfilingStackOwnerTLS::Get(); + if (!profilingStackOwner) { + return nullptr; + } + return &profilingStackOwner->ProfilingStack(); + } + + static void SetRegisteredThreadAndAutoProfilerLabelProfilingStack( + PSLockRef, class RegisteredThread* aRegisteredThread) { + if (!IsTLSInited()) { + return; + } + MOZ_RELEASE_ASSERT( + aRegisteredThread, + "Use ResetRegisteredThread() instead of SetRegisteredThread(nullptr)"); + sRegisteredThread.set(aRegisteredThread); + ProfilingStackOwner& profilingStackOwner = + aRegisteredThread->RacyRegisteredThread().ProfilingStackOwner(); + profilingStackOwner.AddRef(); + AutoProfilerLabel::ProfilingStackOwnerTLS::Set(&profilingStackOwner); + } + + // Only reset the registered thread. The AutoProfilerLabel's ProfilingStack + // is kept, because the thread may not have unregistered itself yet, so it may + // still push/pop labels even after the profiler has shut down. + static void ResetRegisteredThread(PSLockRef) { + if (!IsTLSInited()) { + return; + } + sRegisteredThread.set(nullptr); + } + + // Reset the AutoProfilerLabels' ProfilingStack, because the thread is + // unregistering itself. + static void ResetAutoProfilerLabelProfilingStack(PSLockRef) { + if (!IsTLSInited()) { + return; + } + MOZ_RELEASE_ASSERT( + AutoProfilerLabel::ProfilingStackOwnerTLS::Get(), + "ResetAutoProfilerLabelProfilingStack should only be called once"); + AutoProfilerLabel::ProfilingStackOwnerTLS::Get()->Release(); + AutoProfilerLabel::ProfilingStackOwnerTLS::Set(nullptr); + } + + private: + // Only written once from `profiler_init` calling + // `TLSRegisteredThread::Init()`; all reads should only happen after `Init()`, + // so there is no need to make it atomic. + enum class State { Uninitialized = 0, Initialized, Unavailable }; + static State sState; + + // This is a non-owning reference to the RegisteredThread; + // CorePS::mRegisteredThreads is the owning reference. On thread + // deregistration, this reference is cleared and the RegisteredThread is + // destroyed. + static MOZ_THREAD_LOCAL(class RegisteredThread*) sRegisteredThread; +}; + +// Zero-initialized to State::Uninitialized. +/* static */ +TLSRegisteredThread::State TLSRegisteredThread::sState; + +/* static */ +MOZ_THREAD_LOCAL(RegisteredThread*) TLSRegisteredThread::sRegisteredThread; + +// Only written once from `profiler_init` (through `TLSRegisteredThread::Init()` +// and `AutoProfilerLabel::ProfilingStackOwnerTLS::Init()`); all reads should +// only happen after `Init()`, so there is no need to make it atomic. +// Zero-initialized to State::Uninitialized. +/* static */ +AutoProfilerLabel::ProfilingStackOwnerTLS::State + AutoProfilerLabel::ProfilingStackOwnerTLS::sState; + +// Although you can access a thread's ProfilingStack via +// TLSRegisteredThread::sRegisteredThread, we also have a second TLS pointer +// directly to the ProfilingStack. Here's why. +// +// - We need to be able to push to and pop from the ProfilingStack in +// AutoProfilerLabel. +// +// - The class functions are hot and must be defined in GeckoProfiler.h so they +// can be inlined. +// +// - We don't want to expose TLSRegisteredThread (and RegisteredThread) in +// GeckoProfiler.h. +// +// This second pointer isn't ideal, but does provide a way to satisfy those +// constraints. TLSRegisteredThread is responsible for updating it. +// +// The (Racy)RegisteredThread and AutoProfilerLabel::ProfilingStackOwnerTLS +// co-own the thread's ProfilingStack, so whichever is reset second, is +// responsible for destroying the ProfilingStack; Because MOZ_THREAD_LOCAL +// doesn't support RefPtr, AddRef&Release are done explicitly in +// TLSRegisteredThread. +/* static */ +MOZ_THREAD_LOCAL(ProfilingStackOwner*) +AutoProfilerLabel::ProfilingStackOwnerTLS::sProfilingStackOwnerTLS; + +/* static */ +void AutoProfilerLabel::ProfilingStackOwnerTLS::Init() { + MOZ_ASSERT(sState == State::Uninitialized, "Already initialized"); + sState = + sProfilingStackOwnerTLS.init() ? State::Initialized : State::Unavailable; +} + +void ProfilingStackOwner::DumpStackAndCrash() const { + fprintf(stderr, + "ProfilingStackOwner::DumpStackAndCrash() thread id: %d, size: %u\n", + profiler_current_thread_id(), unsigned(mProfilingStack.stackSize())); + js::ProfilingStackFrame* allFrames = mProfilingStack.frames; + for (uint32_t i = 0; i < mProfilingStack.stackSize(); i++) { + js::ProfilingStackFrame& frame = allFrames[i]; + if (frame.isLabelFrame()) { + fprintf(stderr, "%u: label frame, sp=%p, label='%s' (%s)\n", unsigned(i), + frame.stackAddress(), frame.label(), + frame.dynamicString() ? frame.dynamicString() : "-"); + } else { + fprintf(stderr, "%u: non-label frame\n", unsigned(i)); + } + } + + MOZ_CRASH("Non-empty stack!"); +} + +// The name of the main thread. +static const char* const kMainThreadName = "GeckoMain"; + +//////////////////////////////////////////////////////////////////////// +// BEGIN sampling/unwinding code + +// The registers used for stack unwinding and a few other sampling purposes. +// The ctor does nothing; users are responsible for filling in the fields. +class Registers { + public: + Registers() : mPC{nullptr}, mSP{nullptr}, mFP{nullptr}, mLR{nullptr} {} + +#if defined(HAVE_NATIVE_UNWIND) + // Fills in mPC, mSP, mFP, mLR, and mContext for a synchronous sample. + void SyncPopulate(); +#endif + + void Clear() { memset(this, 0, sizeof(*this)); } + + // These fields are filled in by + // Sampler::SuspendAndSampleAndResumeThread() for periodic and backtrace + // samples, and by SyncPopulate() for synchronous samples. + Address mPC; // Instruction pointer. + Address mSP; // Stack pointer. + Address mFP; // Frame pointer. + Address mLR; // ARM link register. +#if defined(GP_OS_linux) || defined(GP_OS_android) || defined(GP_OS_freebsd) + // This contains all the registers, which means it duplicates the four fields + // above. This is ok. + ucontext_t* mContext; // The context from the signal handler. +#endif +}; + +// Setting MAX_NATIVE_FRAMES too high risks the unwinder wasting a lot of time +// looping on corrupted stacks. +static const size_t MAX_NATIVE_FRAMES = 1024; + +struct NativeStack { + void* mPCs[MAX_NATIVE_FRAMES]; + void* mSPs[MAX_NATIVE_FRAMES]; + size_t mCount; // Number of frames filled. + + NativeStack() : mPCs(), mSPs(), mCount(0) {} +}; + +Atomic<bool> WALKING_JS_STACK(false); + +struct AutoWalkJSStack { + bool walkAllowed; + + AutoWalkJSStack() : walkAllowed(false) { + walkAllowed = WALKING_JS_STACK.compareExchange(false, true); + } + + ~AutoWalkJSStack() { + if (walkAllowed) { + WALKING_JS_STACK = false; + } + } +}; + +// Merges the profiling stack, native stack, and JS stack, outputting the +// details to aCollector. +static void MergeStacks(uint32_t aFeatures, bool aIsSynchronous, + const RegisteredThread& aRegisteredThread, + const Registers& aRegs, const NativeStack& aNativeStack, + ProfilerStackCollector& aCollector, + JsFrameBuffer aJsFrames) { + // WARNING: this function runs within the profiler's "critical section". + // WARNING: this function might be called while the profiler is inactive, and + // cannot rely on ActivePS. + + const ProfilingStack& profilingStack = + aRegisteredThread.RacyRegisteredThread().ProfilingStack(); + const js::ProfilingStackFrame* profilingStackFrames = profilingStack.frames; + uint32_t profilingStackFrameCount = profilingStack.stackSize(); + JSContext* context = aRegisteredThread.GetJSContext(); + + // Make a copy of the JS stack into a JSFrame array. This is necessary since, + // like the native stack, the JS stack is iterated youngest-to-oldest and we + // need to iterate oldest-to-youngest when adding frames to aInfo. + + // Non-periodic sampling passes Nothing() as the buffer write position to + // ProfilingFrameIterator to avoid incorrectly resetting the buffer position + // of sampled JIT frames inside the JS engine. + Maybe<uint64_t> samplePosInBuffer; + if (!aIsSynchronous) { + // aCollector.SamplePositionInBuffer() will return Nothing() when + // profiler_suspend_and_sample_thread is called from the background hang + // reporter. + samplePosInBuffer = aCollector.SamplePositionInBuffer(); + } + uint32_t jsCount = 0; + + // Only walk jit stack if profiling frame iterator is turned on. + if (context && JS::IsProfilingEnabledForContext(context)) { + AutoWalkJSStack autoWalkJSStack; + + if (autoWalkJSStack.walkAllowed) { + JS::ProfilingFrameIterator::RegisterState registerState; + registerState.pc = aRegs.mPC; + registerState.sp = aRegs.mSP; + registerState.lr = aRegs.mLR; + registerState.fp = aRegs.mFP; + + JS::ProfilingFrameIterator jsIter(context, registerState, + samplePosInBuffer); + for (; jsCount < MAX_JS_FRAMES && !jsIter.done(); ++jsIter) { + if (aIsSynchronous || jsIter.isWasm()) { + uint32_t extracted = + jsIter.extractStack(aJsFrames, jsCount, MAX_JS_FRAMES); + jsCount += extracted; + if (jsCount == MAX_JS_FRAMES) { + break; + } + } else { + Maybe<JS::ProfilingFrameIterator::Frame> frame = + jsIter.getPhysicalFrameWithoutLabel(); + if (frame.isSome()) { + aJsFrames[jsCount++] = frame.value(); + } + } + } + } + } + + // While the profiling stack array is ordered oldest-to-youngest, the JS and + // native arrays are ordered youngest-to-oldest. We must add frames to aInfo + // oldest-to-youngest. Thus, iterate over the profiling stack forwards and JS + // and native arrays backwards. Note: this means the terminating condition + // jsIndex and nativeIndex is being < 0. + uint32_t profilingStackIndex = 0; + int32_t jsIndex = jsCount - 1; + int32_t nativeIndex = aNativeStack.mCount - 1; + + uint8_t* lastLabelFrameStackAddr = nullptr; + uint8_t* jitEndStackAddr = nullptr; + + // Iterate as long as there is at least one frame remaining. + while (profilingStackIndex != profilingStackFrameCount || jsIndex >= 0 || + nativeIndex >= 0) { + // There are 1 to 3 frames available. Find and add the oldest. + uint8_t* profilingStackAddr = nullptr; + uint8_t* jsStackAddr = nullptr; + uint8_t* nativeStackAddr = nullptr; + uint8_t* jsActivationAddr = nullptr; + + if (profilingStackIndex != profilingStackFrameCount) { + const js::ProfilingStackFrame& profilingStackFrame = + profilingStackFrames[profilingStackIndex]; + + if (profilingStackFrame.isLabelFrame() || + profilingStackFrame.isSpMarkerFrame()) { + lastLabelFrameStackAddr = (uint8_t*)profilingStackFrame.stackAddress(); + } + + // Skip any JS_OSR frames. Such frames are used when the JS interpreter + // enters a jit frame on a loop edge (via on-stack-replacement, or OSR). + // To avoid both the profiling stack frame and jit frame being recorded + // (and showing up twice), the interpreter marks the interpreter + // profiling stack frame as JS_OSR to ensure that it doesn't get counted. + if (profilingStackFrame.isOSRFrame()) { + profilingStackIndex++; + continue; + } + + MOZ_ASSERT(lastLabelFrameStackAddr); + profilingStackAddr = lastLabelFrameStackAddr; + } + + if (jsIndex >= 0) { + jsStackAddr = (uint8_t*)aJsFrames[jsIndex].stackAddress; + jsActivationAddr = (uint8_t*)aJsFrames[jsIndex].activation; + } + + if (nativeIndex >= 0) { + nativeStackAddr = (uint8_t*)aNativeStack.mSPs[nativeIndex]; + } + + // If there's a native stack frame which has the same SP as a profiling + // stack frame, pretend we didn't see the native stack frame. Ditto for a + // native stack frame which has the same SP as a JS stack frame. In effect + // this means profiling stack frames or JS frames trump conflicting native + // frames. + if (nativeStackAddr && (profilingStackAddr == nativeStackAddr || + jsStackAddr == nativeStackAddr)) { + nativeStackAddr = nullptr; + nativeIndex--; + MOZ_ASSERT(profilingStackAddr || jsStackAddr); + } + + // Sanity checks. + MOZ_ASSERT_IF(profilingStackAddr, + profilingStackAddr != jsStackAddr && + profilingStackAddr != nativeStackAddr); + MOZ_ASSERT_IF(jsStackAddr, jsStackAddr != profilingStackAddr && + jsStackAddr != nativeStackAddr); + MOZ_ASSERT_IF(nativeStackAddr, nativeStackAddr != profilingStackAddr && + nativeStackAddr != jsStackAddr); + + // Check to see if profiling stack frame is top-most. + if (profilingStackAddr > jsStackAddr && + profilingStackAddr > nativeStackAddr) { + MOZ_ASSERT(profilingStackIndex < profilingStackFrameCount); + const js::ProfilingStackFrame& profilingStackFrame = + profilingStackFrames[profilingStackIndex]; + + // Sp marker frames are just annotations and should not be recorded in + // the profile. + if (!profilingStackFrame.isSpMarkerFrame()) { + // The JIT only allows the top-most frame to have a nullptr pc. + MOZ_ASSERT_IF( + profilingStackFrame.isJsFrame() && profilingStackFrame.script() && + !profilingStackFrame.pc(), + &profilingStackFrame == + &profilingStack.frames[profilingStack.stackSize() - 1]); + aCollector.CollectProfilingStackFrame(profilingStackFrame); + } + profilingStackIndex++; + continue; + } + + // Check to see if JS jit stack frame is top-most + if (jsStackAddr > nativeStackAddr) { + MOZ_ASSERT(jsIndex >= 0); + const JS::ProfilingFrameIterator::Frame& jsFrame = aJsFrames[jsIndex]; + jitEndStackAddr = (uint8_t*)jsFrame.endStackAddress; + // Stringifying non-wasm JIT frames is delayed until streaming time. To + // re-lookup the entry in the JitcodeGlobalTable, we need to store the + // JIT code address (OptInfoAddr) in the circular buffer. + // + // Note that we cannot do this when we are sychronously sampling the + // current thread; that is, when called from profiler_get_backtrace. The + // captured backtrace is usually externally stored for an indeterminate + // amount of time, such as in nsRefreshDriver. Problematically, the + // stored backtrace may be alive across a GC during which the profiler + // itself is disabled. In that case, the JS engine is free to discard its + // JIT code. This means that if we inserted such OptInfoAddr entries into + // the buffer, nsRefreshDriver would now be holding on to a backtrace + // with stale JIT code return addresses. + if (aIsSynchronous || + jsFrame.kind == JS::ProfilingFrameIterator::Frame_Wasm) { + aCollector.CollectWasmFrame(jsFrame.label); + } else if (jsFrame.kind == + JS::ProfilingFrameIterator::Frame_BaselineInterpreter) { + // Materialize a ProfilingStackFrame similar to the C++ Interpreter. We + // also set the IS_BLINTERP_FRAME flag to differentiate though. + JSScript* script = jsFrame.interpreterScript; + jsbytecode* pc = jsFrame.interpreterPC(); + js::ProfilingStackFrame stackFrame; + constexpr uint32_t ExtraFlags = + uint32_t(js::ProfilingStackFrame::Flags::IS_BLINTERP_FRAME); + stackFrame.initJsFrame<JS::ProfilingCategoryPair::JS_BaselineInterpret, + ExtraFlags>("", jsFrame.label, script, pc, + jsFrame.realmID); + aCollector.CollectProfilingStackFrame(stackFrame); + } else { + MOZ_ASSERT(jsFrame.kind == JS::ProfilingFrameIterator::Frame_Ion || + jsFrame.kind == JS::ProfilingFrameIterator::Frame_Baseline); + aCollector.CollectJitReturnAddr(jsFrame.returnAddress()); + } + + jsIndex--; + continue; + } + + // If we reach here, there must be a native stack frame and it must be the + // greatest frame. + if (nativeStackAddr && + // If the latest JS frame was JIT, this could be the native frame that + // corresponds to it. In that case, skip the native frame, because + // there's no need for the same frame to be present twice in the stack. + // The JS frame can be considered the symbolicated version of the native + // frame. + (!jitEndStackAddr || nativeStackAddr < jitEndStackAddr) && + // This might still be a JIT operation, check to make sure that is not + // in range of the NEXT JavaScript's stacks' activation address. + (!jsActivationAddr || nativeStackAddr > jsActivationAddr)) { + MOZ_ASSERT(nativeIndex >= 0); + void* addr = (void*)aNativeStack.mPCs[nativeIndex]; + aCollector.CollectNativeLeafAddr(addr); + } + if (nativeIndex >= 0) { + nativeIndex--; + } + } + + // Update the JS context with the current profile sample buffer generation. + // + // Only do this for periodic samples. We don't want to do this for + // synchronous samples, and we also don't want to do it for calls to + // profiler_suspend_and_sample_thread() from the background hang reporter - + // in that case, aCollector.BufferRangeStart() will return Nothing(). + if (!aIsSynchronous && context && aCollector.BufferRangeStart()) { + uint64_t bufferRangeStart = *aCollector.BufferRangeStart(); + JS::SetJSContextProfilerSampleBufferRangeStart(context, bufferRangeStart); + } +} + +#if defined(GP_OS_windows) && defined(USE_MOZ_STACK_WALK) +static HANDLE GetThreadHandle(PlatformData* aData); +#endif + +#if defined(USE_FRAME_POINTER_STACK_WALK) || defined(USE_MOZ_STACK_WALK) +static void StackWalkCallback(uint32_t aFrameNumber, void* aPC, void* aSP, + void* aClosure) { + NativeStack* nativeStack = static_cast<NativeStack*>(aClosure); + MOZ_ASSERT(nativeStack->mCount < MAX_NATIVE_FRAMES); + nativeStack->mSPs[nativeStack->mCount] = aSP; + nativeStack->mPCs[nativeStack->mCount] = aPC; + nativeStack->mCount++; +} +#endif + +#if defined(USE_FRAME_POINTER_STACK_WALK) +static void DoFramePointerBacktrace(PSLockRef aLock, + const RegisteredThread& aRegisteredThread, + const Registers& aRegs, + NativeStack& aNativeStack) { + // WARNING: this function runs within the profiler's "critical section". + // WARNING: this function might be called while the profiler is inactive, and + // cannot rely on ActivePS. + + // Start with the current function. We use 0 as the frame number here because + // the FramePointerStackWalk() call below will use 1..N. This is a bit weird + // but it doesn't matter because StackWalkCallback() doesn't use the frame + // number argument. + StackWalkCallback(/* frameNum */ 0, aRegs.mPC, aRegs.mSP, &aNativeStack); + + uint32_t maxFrames = uint32_t(MAX_NATIVE_FRAMES - aNativeStack.mCount); + + const void* stackEnd = aRegisteredThread.StackTop(); + if (aRegs.mFP >= aRegs.mSP && aRegs.mFP <= stackEnd) { + FramePointerStackWalk(StackWalkCallback, /* skipFrames */ 0, maxFrames, + &aNativeStack, reinterpret_cast<void**>(aRegs.mFP), + const_cast<void*>(stackEnd)); + } +} +#endif + +#if defined(USE_MOZ_STACK_WALK) +static void DoMozStackWalkBacktrace(PSLockRef aLock, + const RegisteredThread& aRegisteredThread, + const Registers& aRegs, + NativeStack& aNativeStack) { + // WARNING: this function runs within the profiler's "critical section". + // WARNING: this function might be called while the profiler is inactive, and + // cannot rely on ActivePS. + + // Start with the current function. We use 0 as the frame number here because + // the MozStackWalkThread() call below will use 1..N. This is a bit weird but + // it doesn't matter because StackWalkCallback() doesn't use the frame number + // argument. + StackWalkCallback(/* frameNum */ 0, aRegs.mPC, aRegs.mSP, &aNativeStack); + + uint32_t maxFrames = uint32_t(MAX_NATIVE_FRAMES - aNativeStack.mCount); + + HANDLE thread = GetThreadHandle(aRegisteredThread.GetPlatformData()); + MOZ_ASSERT(thread); + MozStackWalkThread(StackWalkCallback, /* skipFrames */ 0, maxFrames, + &aNativeStack, thread, /* context */ nullptr); +} +#endif + +#ifdef USE_EHABI_STACKWALK +static void DoEHABIBacktrace(PSLockRef aLock, + const RegisteredThread& aRegisteredThread, + const Registers& aRegs, + NativeStack& aNativeStack) { + // WARNING: this function runs within the profiler's "critical section". + // WARNING: this function might be called while the profiler is inactive, and + // cannot rely on ActivePS. + + aNativeStack.mCount = + EHABIStackWalk(aRegs.mContext->uc_mcontext, + const_cast<void*>(aRegisteredThread.StackTop()), + aNativeStack.mSPs, aNativeStack.mPCs, MAX_NATIVE_FRAMES); +} +#endif + +#ifdef USE_LUL_STACKWALK + +// See the comment at the callsite for why this function is necessary. +# if defined(MOZ_HAVE_ASAN_BLACKLIST) +MOZ_ASAN_BLACKLIST static void ASAN_memcpy(void* aDst, const void* aSrc, + size_t aLen) { + // The obvious thing to do here is call memcpy(). However, although + // ASAN_memcpy() is not instrumented by ASAN, memcpy() still is, and the + // false positive still manifests! So we must implement memcpy() ourselves + // within this function. + char* dst = static_cast<char*>(aDst); + const char* src = static_cast<const char*>(aSrc); + + for (size_t i = 0; i < aLen; i++) { + dst[i] = src[i]; + } +} +# endif + +static void DoLULBacktrace(PSLockRef aLock, + const RegisteredThread& aRegisteredThread, + const Registers& aRegs, NativeStack& aNativeStack) { + // WARNING: this function runs within the profiler's "critical section". + // WARNING: this function might be called while the profiler is inactive, and + // cannot rely on ActivePS. + + const mcontext_t* mc = &aRegs.mContext->uc_mcontext; + + lul::UnwindRegs startRegs; + memset(&startRegs, 0, sizeof(startRegs)); + +# if defined(GP_PLAT_amd64_linux) || defined(GP_PLAT_amd64_android) + startRegs.xip = lul::TaggedUWord(mc->gregs[REG_RIP]); + startRegs.xsp = lul::TaggedUWord(mc->gregs[REG_RSP]); + startRegs.xbp = lul::TaggedUWord(mc->gregs[REG_RBP]); +# elif defined(GP_PLAT_amd64_freebsd) + startRegs.xip = lul::TaggedUWord(mc->mc_rip); + startRegs.xsp = lul::TaggedUWord(mc->mc_rsp); + startRegs.xbp = lul::TaggedUWord(mc->mc_rbp); +# elif defined(GP_PLAT_arm_linux) || defined(GP_PLAT_arm_android) + startRegs.r15 = lul::TaggedUWord(mc->arm_pc); + startRegs.r14 = lul::TaggedUWord(mc->arm_lr); + startRegs.r13 = lul::TaggedUWord(mc->arm_sp); + startRegs.r12 = lul::TaggedUWord(mc->arm_ip); + startRegs.r11 = lul::TaggedUWord(mc->arm_fp); + startRegs.r7 = lul::TaggedUWord(mc->arm_r7); +# elif defined(GP_PLAT_arm64_linux) || defined(GP_PLAT_arm64_android) + startRegs.pc = lul::TaggedUWord(mc->pc); + startRegs.x29 = lul::TaggedUWord(mc->regs[29]); + startRegs.x30 = lul::TaggedUWord(mc->regs[30]); + startRegs.sp = lul::TaggedUWord(mc->sp); +# elif defined(GP_PLAT_arm64_freebsd) + startRegs.pc = lul::TaggedUWord(mc->mc_gpregs.gp_elr); + startRegs.x29 = lul::TaggedUWord(mc->mc_gpregs.gp_x[29]); + startRegs.x30 = lul::TaggedUWord(mc->mc_gpregs.gp_lr); + startRegs.sp = lul::TaggedUWord(mc->mc_gpregs.gp_sp); +# elif defined(GP_PLAT_x86_linux) || defined(GP_PLAT_x86_android) + startRegs.xip = lul::TaggedUWord(mc->gregs[REG_EIP]); + startRegs.xsp = lul::TaggedUWord(mc->gregs[REG_ESP]); + startRegs.xbp = lul::TaggedUWord(mc->gregs[REG_EBP]); +# elif defined(GP_PLAT_mips64_linux) + startRegs.pc = lul::TaggedUWord(mc->pc); + startRegs.sp = lul::TaggedUWord(mc->gregs[29]); + startRegs.fp = lul::TaggedUWord(mc->gregs[30]); +# else +# error "Unknown plat" +# endif + + // Copy up to N_STACK_BYTES from rsp-REDZONE upwards, but not going past the + // stack's registered top point. Do some basic sanity checks too. This + // assumes that the TaggedUWord holding the stack pointer value is valid, but + // it should be, since it was constructed that way in the code just above. + + // We could construct |stackImg| so that LUL reads directly from the stack in + // question, rather than from a copy of it. That would reduce overhead and + // space use a bit. However, it gives a problem with dynamic analysis tools + // (ASan, TSan, Valgrind) which is that such tools will report invalid or + // racing memory accesses, and such accesses will be reported deep inside LUL. + // By taking a copy here, we can either sanitise the copy (for Valgrind) or + // copy it using an unchecked memcpy (for ASan, TSan). That way we don't have + // to try and suppress errors inside LUL. + // + // N_STACK_BYTES is set to 160KB. This is big enough to hold all stacks + // observed in some minutes of testing, whilst keeping the size of this + // function (DoNativeBacktrace)'s frame reasonable. Most stacks observed in + // practice are small, 4KB or less, and so the copy costs are insignificant + // compared to other profiler overhead. + // + // |stackImg| is allocated on this (the sampling thread's) stack. That + // implies that the frame for this function is at least N_STACK_BYTES large. + // In general it would be considered unacceptable to have such a large frame + // on a stack, but it only exists for the unwinder thread, and so is not + // expected to be a problem. Allocating it on the heap is troublesome because + // this function runs whilst the sampled thread is suspended, so any heap + // allocation risks deadlock. Allocating it as a global variable is not + // thread safe, which would be a problem if we ever allow multiple sampler + // threads. Hence allocating it on the stack seems to be the least-worst + // option. + + lul::StackImage stackImg; + + { +# if defined(GP_PLAT_amd64_linux) || defined(GP_PLAT_amd64_android) || \ + defined(GP_PLAT_amd64_freebsd) + uintptr_t rEDZONE_SIZE = 128; + uintptr_t start = startRegs.xsp.Value() - rEDZONE_SIZE; +# elif defined(GP_PLAT_arm_linux) || defined(GP_PLAT_arm_android) + uintptr_t rEDZONE_SIZE = 0; + uintptr_t start = startRegs.r13.Value() - rEDZONE_SIZE; +# elif defined(GP_PLAT_arm64_linux) || defined(GP_PLAT_arm64_android) || \ + defined(GP_PLAT_arm64_freebsd) + uintptr_t rEDZONE_SIZE = 0; + uintptr_t start = startRegs.sp.Value() - rEDZONE_SIZE; +# elif defined(GP_PLAT_x86_linux) || defined(GP_PLAT_x86_android) + uintptr_t rEDZONE_SIZE = 0; + uintptr_t start = startRegs.xsp.Value() - rEDZONE_SIZE; +# elif defined(GP_PLAT_mips64_linux) + uintptr_t rEDZONE_SIZE = 0; + uintptr_t start = startRegs.sp.Value() - rEDZONE_SIZE; +# else +# error "Unknown plat" +# endif + uintptr_t end = reinterpret_cast<uintptr_t>(aRegisteredThread.StackTop()); + uintptr_t ws = sizeof(void*); + start &= ~(ws - 1); + end &= ~(ws - 1); + uintptr_t nToCopy = 0; + if (start < end) { + nToCopy = end - start; + if (nToCopy > lul::N_STACK_BYTES) nToCopy = lul::N_STACK_BYTES; + } + MOZ_ASSERT(nToCopy <= lul::N_STACK_BYTES); + stackImg.mLen = nToCopy; + stackImg.mStartAvma = start; + if (nToCopy > 0) { + // If this is a vanilla memcpy(), ASAN makes the following complaint: + // + // ERROR: AddressSanitizer: stack-buffer-underflow ... + // ... + // HINT: this may be a false positive if your program uses some custom + // stack unwind mechanism or swapcontext + // + // This code is very much a custom stack unwind mechanism! So we use an + // alternative memcpy() implementation that is ignored by ASAN. +# if defined(MOZ_HAVE_ASAN_BLACKLIST) + ASAN_memcpy(&stackImg.mContents[0], (void*)start, nToCopy); +# else + memcpy(&stackImg.mContents[0], (void*)start, nToCopy); +# endif + (void)VALGRIND_MAKE_MEM_DEFINED(&stackImg.mContents[0], nToCopy); + } + } + + size_t framePointerFramesAcquired = 0; + lul::LUL* lul = CorePS::Lul(aLock); + lul->Unwind(reinterpret_cast<uintptr_t*>(aNativeStack.mPCs), + reinterpret_cast<uintptr_t*>(aNativeStack.mSPs), + &aNativeStack.mCount, &framePointerFramesAcquired, + MAX_NATIVE_FRAMES, &startRegs, &stackImg); + + // Update stats in the LUL stats object. Unfortunately this requires + // three global memory operations. + lul->mStats.mContext += 1; + lul->mStats.mCFI += aNativeStack.mCount - 1 - framePointerFramesAcquired; + lul->mStats.mFP += framePointerFramesAcquired; +} + +#endif + +#ifdef HAVE_NATIVE_UNWIND +static void DoNativeBacktrace(PSLockRef aLock, + const RegisteredThread& aRegisteredThread, + const Registers& aRegs, + NativeStack& aNativeStack) { + // This method determines which stackwalker is used for periodic and + // synchronous samples. (Backtrace samples are treated differently, see + // profiler_suspend_and_sample_thread() for details). The only part of the + // ordering that matters is that LUL must precede FRAME_POINTER, because on + // Linux they can both be present. +# if defined(USE_LUL_STACKWALK) + DoLULBacktrace(aLock, aRegisteredThread, aRegs, aNativeStack); +# elif defined(USE_EHABI_STACKWALK) + DoEHABIBacktrace(aLock, aRegisteredThread, aRegs, aNativeStack); +# elif defined(USE_FRAME_POINTER_STACK_WALK) + DoFramePointerBacktrace(aLock, aRegisteredThread, aRegs, aNativeStack); +# elif defined(USE_MOZ_STACK_WALK) + DoMozStackWalkBacktrace(aLock, aRegisteredThread, aRegs, aNativeStack); +# else +# error "Invalid configuration" +# endif +} +#endif + +// Writes some components shared by periodic and synchronous profiles to +// ActivePS's ProfileBuffer. (This should only be called from DoSyncSample() +// and DoPeriodicSample().) +// +// The grammar for entry sequences is in a comment above +// ProfileBuffer::StreamSamplesToJSON. +static inline void DoSharedSample(PSLockRef aLock, bool aIsSynchronous, + RegisteredThread& aRegisteredThread, + const Registers& aRegs, uint64_t aSamplePos, + ProfileBuffer& aBuffer) { + // WARNING: this function runs within the profiler's "critical section". + + MOZ_ASSERT(!aBuffer.IsThreadSafe(), + "Mutexes cannot be used inside this critical section"); + + MOZ_RELEASE_ASSERT(ActivePS::Exists(aLock)); + + ProfileBufferCollector collector(aBuffer, aSamplePos); + NativeStack nativeStack; +#if defined(HAVE_NATIVE_UNWIND) + if (ActivePS::FeatureStackWalk(aLock)) { + DoNativeBacktrace(aLock, aRegisteredThread, aRegs, nativeStack); + + MergeStacks(ActivePS::Features(aLock), aIsSynchronous, aRegisteredThread, + aRegs, nativeStack, collector, CorePS::JsFrames(aLock)); + } else +#endif + { + MergeStacks(ActivePS::Features(aLock), aIsSynchronous, aRegisteredThread, + aRegs, nativeStack, collector, CorePS::JsFrames(aLock)); + + // We can't walk the whole native stack, but we can record the top frame. + if (ActivePS::FeatureLeaf(aLock)) { + aBuffer.AddEntry(ProfileBufferEntry::NativeLeafAddr((void*)aRegs.mPC)); + } + } +} + +// Writes the components of a synchronous sample to the given ProfileBuffer. +static void DoSyncSample(PSLockRef aLock, RegisteredThread& aRegisteredThread, + const TimeStamp& aNow, const Registers& aRegs, + ProfileBuffer& aBuffer) { + // WARNING: this function runs within the profiler's "critical section". + + uint64_t samplePos = + aBuffer.AddThreadIdEntry(aRegisteredThread.Info()->ThreadId()); + + TimeDuration delta = aNow - CorePS::ProcessStartTime(); + aBuffer.AddEntry(ProfileBufferEntry::Time(delta.ToMilliseconds())); + + DoSharedSample(aLock, /* aIsSynchronous = */ true, aRegisteredThread, aRegs, + samplePos, aBuffer); +} + +// Writes the components of a periodic sample to ActivePS's ProfileBuffer. +// The ThreadId entry is already written in the main ProfileBuffer, its location +// is `aSamplePos`, we can write the rest to `aBuffer` (which may be different). +static inline void DoPeriodicSample(PSLockRef aLock, + RegisteredThread& aRegisteredThread, + const Registers& aRegs, uint64_t aSamplePos, + ProfileBuffer& aBuffer) { + // WARNING: this function runs within the profiler's "critical section". + + DoSharedSample(aLock, /* aIsSynchronous = */ false, aRegisteredThread, aRegs, + aSamplePos, aBuffer); +} + +// END sampling/unwinding code +//////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////// +// BEGIN saving/streaming code + +const static uint64_t kJS_MAX_SAFE_UINTEGER = +9007199254740991ULL; + +static int64_t SafeJSInteger(uint64_t aValue) { + return aValue <= kJS_MAX_SAFE_UINTEGER ? int64_t(aValue) : -1; +} + +static void AddSharedLibraryInfoToStream(JSONWriter& aWriter, + const SharedLibrary& aLib) { + aWriter.StartObjectElement(); + aWriter.IntProperty("start", SafeJSInteger(aLib.GetStart())); + aWriter.IntProperty("end", SafeJSInteger(aLib.GetEnd())); + aWriter.IntProperty("offset", SafeJSInteger(aLib.GetOffset())); + aWriter.StringProperty("name", NS_ConvertUTF16toUTF8(aLib.GetModuleName())); + aWriter.StringProperty("path", NS_ConvertUTF16toUTF8(aLib.GetModulePath())); + aWriter.StringProperty("debugName", + NS_ConvertUTF16toUTF8(aLib.GetDebugName())); + aWriter.StringProperty("debugPath", + NS_ConvertUTF16toUTF8(aLib.GetDebugPath())); + aWriter.StringProperty("breakpadId", aLib.GetBreakpadId()); + aWriter.StringProperty("arch", aLib.GetArch()); + aWriter.EndObject(); +} + +void AppendSharedLibraries(JSONWriter& aWriter) { + SharedLibraryInfo info = SharedLibraryInfo::GetInfoForSelf(); + info.SortByAddress(); + for (size_t i = 0; i < info.GetSize(); i++) { + AddSharedLibraryInfoToStream(aWriter, info.GetEntry(i)); + } +} + +#ifdef MOZ_TASK_TRACER +static void StreamNameAndThreadId(JSONWriter& aWriter, const char* aName, + int aThreadId) { + aWriter.StartObjectElement(); + { + if (XRE_GetProcessType() == GeckoProcessType_Plugin) { + // TODO Add the proper plugin name + aWriter.StringProperty("name", "Plugin"); + } else { + aWriter.StringProperty("name", aName); + } + aWriter.IntProperty("tid", aThreadId); + } + aWriter.EndObject(); +} +#endif + +static void StreamTaskTracer(PSLockRef aLock, SpliceableJSONWriter& aWriter) { +#ifdef MOZ_TASK_TRACER + MOZ_RELEASE_ASSERT(CorePS::Exists() && ActivePS::Exists(aLock)); + + aWriter.StartArrayProperty("data"); + { + UniquePtr<Vector<nsCString>> data = + tasktracer::GetLoggedData(CorePS::ProcessStartTime()); + for (const nsCString& dataString : *data) { + aWriter.StringElement(dataString.get()); + } + } + aWriter.EndArray(); + + aWriter.StartArrayProperty("threads"); + { + ActivePS::DiscardExpiredDeadProfiledThreads(aLock); + Vector<std::pair<RegisteredThread*, ProfiledThreadData*>> threads = + ActivePS::ProfiledThreads(aLock); + for (auto& thread : threads) { + RefPtr<ThreadInfo> info = thread.second->Info(); + StreamNameAndThreadId(aWriter, info->Name(), info->ThreadId()); + } + } + aWriter.EndArray(); + + aWriter.DoubleProperty("start", + static_cast<double>(tasktracer::GetStartTime())); +#endif +} + +static void StreamCategories(SpliceableJSONWriter& aWriter) { + // Same order as ProfilingCategory. Format: + // [ + // { + // name: "Idle", + // color: "transparent", + // subcategories: ["Other"], + // }, + // { + // name: "Other", + // color: "grey", + // subcategories: [ + // "JSM loading", + // "Subprocess launching", + // "DLL loading" + // ] + // }, + // ... + // ] + +#define CATEGORY_JSON_BEGIN_CATEGORY(name, labelAsString, color) \ + aWriter.Start(); \ + aWriter.StringProperty("name", labelAsString); \ + aWriter.StringProperty("color", color); \ + aWriter.StartArrayProperty("subcategories"); +#define CATEGORY_JSON_SUBCATEGORY(supercategory, name, labelAsString) \ + aWriter.StringElement(labelAsString); +#define CATEGORY_JSON_END_CATEGORY \ + aWriter.EndArray(); \ + aWriter.EndObject(); + + MOZ_PROFILING_CATEGORY_LIST(CATEGORY_JSON_BEGIN_CATEGORY, + CATEGORY_JSON_SUBCATEGORY, + CATEGORY_JSON_END_CATEGORY) + +#undef CATEGORY_JSON_BEGIN_CATEGORY +#undef CATEGORY_JSON_SUBCATEGORY +#undef CATEGORY_JSON_END_CATEGORY +} + +static void StreamMarkerSchema(SpliceableJSONWriter& aWriter) { + // Get an array view with all registered marker-type-specific functions. + Span<const base_profiler_markers_detail::Streaming::MarkerTypeFunctions> + markerTypeFunctionsArray = + base_profiler_markers_detail::Streaming::MarkerTypeFunctionsArray(); + // List of streamed marker names, this is used to spot duplicates. + std::set<std::string> names; + // Stream the display schema for each different one. (Duplications may come + // from the same code potentially living in different libraries.) + for (const auto& markerTypeFunctions : markerTypeFunctionsArray) { + auto name = markerTypeFunctions.mMarkerTypeNameFunction(); + // std::set.insert(T&&) returns a pair, its `second` is true if the element + // was actually inserted (i.e., it was not there yet.) + const bool didInsert = + names.insert(std::string(name.data(), name.size())).second; + if (didInsert) { + markerTypeFunctions.mMarkerSchemaFunction().Stream(aWriter, name); + } + } +} + +// Some meta information that is better recorded before streaming the profile. +// This is *not* intended to be cached, as some values could change between +// profiling sessions. +struct PreRecordedMetaInformation { + bool mAsyncStacks; + + // This struct should only live on the stack, so it's fine to use Auto + // strings. + nsAutoCString mHttpPlatform; + nsAutoCString mHttpOscpu; + nsAutoCString mHttpMisc; + + nsAutoCString mRuntimeABI; + nsAutoCString mRuntimeToolkit; + + nsAutoCString mAppInfoProduct; + nsAutoCString mAppInfoAppBuildID; + nsAutoCString mAppInfoSourceURL; + + int32_t mProcessInfoCpuCount; + int32_t mProcessInfoCpuCores; +}; + +// This function should be called out of the profiler lock. +// It gathers non-trivial data that doesn't require the profiler to stop, or for +// which the request could theoretically deadlock if the profiler is locked. +static PreRecordedMetaInformation PreRecordMetaInformation() { + gPSMutex.AssertCurrentThreadDoesNotOwn(); + + PreRecordedMetaInformation info = {}; // Aggregate-init all fields. + + if (!NS_IsMainThread()) { + // Leave these properties out if we're not on the main thread. + // At the moment, the only case in which this function is called on a + // background thread is if we're in a content process and are going to + // send this profile to the parent process. In that case, the parent + // process profile's "meta" object already has the rest of the properties, + // and the parent process profile is dumped on that process's main thread. + return info; + } + + info.mAsyncStacks = Preferences::GetBool("javascript.options.asyncstack"); + + nsresult res; + + if (nsCOMPtr<nsIHttpProtocolHandler> http = + do_GetService(NS_NETWORK_PROTOCOL_CONTRACTID_PREFIX "http", &res); + !NS_FAILED(res) && http) { + Unused << http->GetPlatform(info.mHttpPlatform); + Unused << http->GetOscpu(info.mHttpOscpu); + Unused << http->GetMisc(info.mHttpMisc); + } + + if (nsCOMPtr<nsIXULRuntime> runtime = + do_GetService("@mozilla.org/xre/runtime;1"); + runtime) { + Unused << runtime->GetXPCOMABI(info.mRuntimeABI); + Unused << runtime->GetWidgetToolkit(info.mRuntimeToolkit); + } + + if (nsCOMPtr<nsIXULAppInfo> appInfo = + do_GetService("@mozilla.org/xre/app-info;1"); + appInfo) { + Unused << appInfo->GetName(info.mAppInfoProduct); + Unused << appInfo->GetAppBuildID(info.mAppInfoAppBuildID); + Unused << appInfo->GetSourceURL(info.mAppInfoSourceURL); + } + + ProcessInfo processInfo = {}; // Aggregate-init all fields to false/zeroes. + if (NS_SUCCEEDED(CollectProcessInfo(processInfo))) { + info.mProcessInfoCpuCount = processInfo.cpuCount; + info.mProcessInfoCpuCores = processInfo.cpuCores; + } + + return info; +} + +// Implemented in platform-specific cpps, to add object properties describing +// the units of CPU measurements in samples. +static void StreamMetaPlatformSampleUnits(PSLockRef aLock, + SpliceableJSONWriter& aWriter); + +static void StreamMetaJSCustomObject( + PSLockRef aLock, SpliceableJSONWriter& aWriter, bool aIsShuttingDown, + const PreRecordedMetaInformation& aPreRecordedMetaInformation) { + MOZ_RELEASE_ASSERT(CorePS::Exists() && ActivePS::Exists(aLock)); + + aWriter.IntProperty("version", 22); + + // The "startTime" field holds the number of milliseconds since midnight + // January 1, 1970 GMT. This grotty code computes (Now - (Now - + // ProcessStartTime)) to convert CorePS::ProcessStartTime() into that form. + TimeDuration delta = TimeStamp::NowUnfuzzed() - CorePS::ProcessStartTime(); + aWriter.DoubleProperty( + "startTime", + static_cast<double>(PR_Now() / 1000.0 - delta.ToMilliseconds())); + + // Write the shutdownTime field. Unlike startTime, shutdownTime is not an + // absolute time stamp: It's relative to startTime. This is consistent with + // all other (non-"startTime") times anywhere in the profile JSON. + if (aIsShuttingDown) { + aWriter.DoubleProperty("shutdownTime", profiler_time()); + } else { + aWriter.NullProperty("shutdownTime"); + } + + aWriter.StartArrayProperty("categories"); + StreamCategories(aWriter); + aWriter.EndArray(); + + aWriter.StartArrayProperty("markerSchema"); + StreamMarkerSchema(aWriter); + aWriter.EndArray(); + + ActivePS::WriteActiveConfiguration(aLock, aWriter, + MakeStringSpan("configuration")); + + if (!NS_IsMainThread()) { + // Leave the rest of the properties out if we're not on the main thread. + // At the moment, the only case in which this function is called on a + // background thread is if we're in a content process and are going to + // send this profile to the parent process. In that case, the parent + // process profile's "meta" object already has the rest of the properties, + // and the parent process profile is dumped on that process's main thread. + return; + } + + aWriter.DoubleProperty("interval", ActivePS::Interval(aLock)); + aWriter.IntProperty("stackwalk", ActivePS::FeatureStackWalk(aLock)); + +#ifdef DEBUG + aWriter.IntProperty("debug", 1); +#else + aWriter.IntProperty("debug", 0); +#endif + + aWriter.IntProperty("gcpoison", JS::IsGCPoisoning() ? 1 : 0); + + aWriter.IntProperty("asyncstack", aPreRecordedMetaInformation.mAsyncStacks); + + aWriter.IntProperty("processType", XRE_GetProcessType()); + + aWriter.StringProperty("updateChannel", MOZ_STRINGIFY(MOZ_UPDATE_CHANNEL)); + + if (!aPreRecordedMetaInformation.mHttpPlatform.IsEmpty()) { + aWriter.StringProperty("platform", + aPreRecordedMetaInformation.mHttpPlatform); + } + if (!aPreRecordedMetaInformation.mHttpOscpu.IsEmpty()) { + aWriter.StringProperty("oscpu", aPreRecordedMetaInformation.mHttpOscpu); + } + if (!aPreRecordedMetaInformation.mHttpMisc.IsEmpty()) { + aWriter.StringProperty("misc", aPreRecordedMetaInformation.mHttpMisc); + } + + if (!aPreRecordedMetaInformation.mRuntimeABI.IsEmpty()) { + aWriter.StringProperty("abi", aPreRecordedMetaInformation.mRuntimeABI); + } + if (!aPreRecordedMetaInformation.mRuntimeToolkit.IsEmpty()) { + aWriter.StringProperty("toolkit", + aPreRecordedMetaInformation.mRuntimeToolkit); + } + + if (!aPreRecordedMetaInformation.mAppInfoProduct.IsEmpty()) { + aWriter.StringProperty("product", + aPreRecordedMetaInformation.mAppInfoProduct); + } + if (!aPreRecordedMetaInformation.mAppInfoAppBuildID.IsEmpty()) { + aWriter.StringProperty("appBuildID", + aPreRecordedMetaInformation.mAppInfoAppBuildID); + } + if (!aPreRecordedMetaInformation.mAppInfoSourceURL.IsEmpty()) { + aWriter.StringProperty("sourceURL", + aPreRecordedMetaInformation.mAppInfoSourceURL); + } + + if (aPreRecordedMetaInformation.mProcessInfoCpuCores > 0) { + aWriter.IntProperty("physicalCPUs", + aPreRecordedMetaInformation.mProcessInfoCpuCores); + } + if (aPreRecordedMetaInformation.mProcessInfoCpuCount > 0) { + aWriter.IntProperty("logicalCPUs", + aPreRecordedMetaInformation.mProcessInfoCpuCount); + } + + aWriter.StartObjectProperty("sampleUnits"); + { + aWriter.StringProperty("time", "ms"); + aWriter.StringProperty("eventDelay", "ms"); + StreamMetaPlatformSampleUnits(aLock, aWriter); + } + aWriter.EndObject(); + + // We should avoid collecting extension metadata for profiler while XPCOM is + // shutting down since it cannot create a new ExtensionPolicyService. + if (!gXPCOMShuttingDown) { + aWriter.StartObjectProperty("extensions"); + { + { + JSONSchemaWriter schema(aWriter); + schema.WriteField("id"); + schema.WriteField("name"); + schema.WriteField("baseURL"); + } + + aWriter.StartArrayProperty("data"); + { + nsTArray<RefPtr<WebExtensionPolicy>> exts; + ExtensionPolicyService::GetSingleton().GetAll(exts); + + for (auto& ext : exts) { + aWriter.StartArrayElement(JSONWriter::SingleLineStyle); + + nsAutoString id; + ext->GetId(id); + aWriter.StringElement(NS_ConvertUTF16toUTF8(id)); + + aWriter.StringElement(NS_ConvertUTF16toUTF8(ext->Name())); + + auto url = ext->GetURL(u""_ns); + if (url.isOk()) { + aWriter.StringElement(NS_ConvertUTF16toUTF8(url.unwrap())); + } + + aWriter.EndArray(); + } + } + aWriter.EndArray(); + } + aWriter.EndObject(); + } +} + +static void StreamPages(PSLockRef aLock, SpliceableJSONWriter& aWriter) { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + ActivePS::DiscardExpiredPages(aLock); + for (const auto& page : ActivePS::ProfiledPages(aLock)) { + page->StreamJSON(aWriter); + } +} + +#if defined(GP_OS_android) +template <int N> +static bool StartsWith(const nsACString& string, const char (&prefix)[N]) { + if (N - 1 > string.Length()) { + return false; + } + return memcmp(string.Data(), prefix, N - 1) == 0; +} + +static JS::ProfilingCategoryPair InferJavaCategory(nsACString& aName) { + if (aName.EqualsLiteral("android.os.MessageQueue.nativePollOnce()")) { + return JS::ProfilingCategoryPair::IDLE; + } + if (aName.EqualsLiteral("java.lang.Object.wait()")) { + return JS::ProfilingCategoryPair::JAVA_BLOCKED; + } + if (StartsWith(aName, "android.") || StartsWith(aName, "com.android.")) { + return JS::ProfilingCategoryPair::JAVA_ANDROID; + } + if (StartsWith(aName, "mozilla.") || StartsWith(aName, "org.mozilla.")) { + return JS::ProfilingCategoryPair::JAVA_MOZILLA; + } + if (StartsWith(aName, "java.") || StartsWith(aName, "sun.") || + StartsWith(aName, "com.sun.")) { + return JS::ProfilingCategoryPair::JAVA_LANGUAGE; + } + if (StartsWith(aName, "kotlin.") || StartsWith(aName, "kotlinx.")) { + return JS::ProfilingCategoryPair::JAVA_KOTLIN; + } + if (StartsWith(aName, "androidx.")) { + return JS::ProfilingCategoryPair::JAVA_ANDROIDX; + } + return JS::ProfilingCategoryPair::OTHER; +} + +static void CollectJavaThreadProfileData(ProfileBuffer& aProfileBuffer) { + // locked_profiler_start uses sample count is 1000 for Java thread. + // This entry size is enough now, but we might have to estimate it + // if we can customize it + + // Pass the samples + // FIXME(bug 1618560): We are currently only profiling the Android UI thread. + constexpr int threadId = 0; + int sampleId = 0; + while (true) { + // Gets the data from the Android UI thread only. + double sampleTime = java::GeckoJavaSampler::GetSampleTime(sampleId); + if (sampleTime == 0.0) { + break; + } + + aProfileBuffer.AddThreadIdEntry(threadId); + aProfileBuffer.AddEntry(ProfileBufferEntry::Time(sampleTime)); + int frameId = 0; + while (true) { + jni::String::LocalRef frameName = + java::GeckoJavaSampler::GetFrameName(sampleId, frameId++); + if (!frameName) { + break; + } + nsCString frameNameString = frameName->ToCString(); + + auto categoryPair = InferJavaCategory(frameNameString); + aProfileBuffer.CollectCodeLocation("", frameNameString.get(), 0, 0, + Nothing(), Nothing(), + Some(categoryPair)); + } + sampleId++; + } + + // Pass the markers now + while (true) { + // Gets the data from the Android UI thread only. + java::GeckoJavaSampler::Marker::LocalRef marker = + java::GeckoJavaSampler::PollNextMarker(); + if (!marker) { + // All markers are transferred. + break; + } + + // Get all the marker information from the Java thread using JNI. + nsCString markerName = marker->GetMarkerName()->ToCString(); + jni::String::LocalRef text = marker->GetMarkerText(); + TimeStamp startTime = + CorePS::ProcessStartTime() + + TimeDuration::FromMilliseconds(marker->GetStartTime()); + + double endTimeMs = marker->GetEndTime(); + // A marker can be either a duration with start and end, or a point in time + // with only startTime. If endTime is 0, this means it's a point in time. + TimeStamp endTime = endTimeMs == 0 + ? startTime + : CorePS::ProcessStartTime() + + TimeDuration::FromMilliseconds(endTimeMs); + MarkerTiming timing = endTimeMs == 0 + ? MarkerTiming::InstantAt(startTime) + : MarkerTiming::Interval(startTime, endTime); + + if (!text) { + // This marker doesn't have a text. + AddMarkerToBuffer(aProfileBuffer.UnderlyingChunkedBuffer(), markerName, + geckoprofiler::category::JAVA_ANDROID, + {MarkerThreadId(threadId), std::move(timing)}); + } else { + // This marker has a text. + AddMarkerToBuffer(aProfileBuffer.UnderlyingChunkedBuffer(), markerName, + geckoprofiler::category::JAVA_ANDROID, + {MarkerThreadId(threadId), std::move(timing)}, + geckoprofiler::markers::TextMarker{}, + text->ToCString()); + } + } +} +#endif + +UniquePtr<ProfilerCodeAddressService> +profiler_code_address_service_for_presymbolication() { + static const bool preSymbolicate = []() { + const char* symbolicate = getenv("MOZ_PROFILER_SYMBOLICATE"); + return symbolicate && symbolicate[0] != '\0'; + }(); + return preSymbolicate ? MakeUnique<ProfilerCodeAddressService>() : nullptr; +} + +static void locked_profiler_stream_json_for_this_process( + PSLockRef aLock, SpliceableJSONWriter& aWriter, double aSinceTime, + const PreRecordedMetaInformation& aPreRecordedMetaInformation, + bool aIsShuttingDown, ProfilerCodeAddressService* aService) { + LOG("locked_profiler_stream_json_for_this_process"); + + MOZ_RELEASE_ASSERT(CorePS::Exists() && ActivePS::Exists(aLock)); + + AUTO_PROFILER_STATS(locked_profiler_stream_json_for_this_process); + + const double collectionStartMs = profiler_time(); + + ProfileBuffer& buffer = ActivePS::Buffer(aLock); + + // If there is a set "Window length", discard older data. + Maybe<double> durationS = ActivePS::Duration(aLock); + if (durationS.isSome()) { + const double durationStartMs = collectionStartMs - *durationS * 1000; + buffer.DiscardSamplesBeforeTime(durationStartMs); + } + + // Put shared library info + aWriter.StartArrayProperty("libs"); + AppendSharedLibraries(aWriter); + aWriter.EndArray(); + + // Put meta data + aWriter.StartObjectProperty("meta"); + { + StreamMetaJSCustomObject(aLock, aWriter, aIsShuttingDown, + aPreRecordedMetaInformation); + } + aWriter.EndObject(); + + // Put page data + aWriter.StartArrayProperty("pages"); + { StreamPages(aLock, aWriter); } + aWriter.EndArray(); + + buffer.StreamProfilerOverheadToJSON(aWriter, CorePS::ProcessStartTime(), + aSinceTime); + buffer.StreamCountersToJSON(aWriter, CorePS::ProcessStartTime(), aSinceTime); + + // Data of TaskTracer doesn't belong in the circular buffer. + if (ActivePS::FeatureTaskTracer(aLock)) { + aWriter.StartObjectProperty("tasktracer"); + StreamTaskTracer(aLock, aWriter); + aWriter.EndObject(); + } + + // Lists the samples for each thread profile + aWriter.StartArrayProperty("threads"); + { + ActivePS::DiscardExpiredDeadProfiledThreads(aLock); + Vector<std::pair<RegisteredThread*, ProfiledThreadData*>> threads = + ActivePS::ProfiledThreads(aLock); + for (auto& thread : threads) { + RegisteredThread* registeredThread = thread.first; + JSContext* cx = + registeredThread ? registeredThread->GetJSContext() : nullptr; + ProfiledThreadData* profiledThreadData = thread.second; + profiledThreadData->StreamJSON( + buffer, cx, aWriter, CorePS::ProcessName(aLock), + CorePS::ETLDplus1(aLock), CorePS::ProcessStartTime(), aSinceTime, + ActivePS::FeatureJSTracer(aLock), aService); + } + +#if defined(GP_OS_android) + if (ActivePS::FeatureJava(aLock)) { + // We are allocating it chunk by chunk. So this will not allocate 64 MiB + // at once. This size should be more than enough for java threads. + // This buffer is being created for each process but Android has + // relatively less processes compared to desktop, so it's okay here. + mozilla::ProfileBufferChunkManagerWithLocalLimit chunkManager( + 64 * 1024 * 1024, 1024 * 1024); + ProfileChunkedBuffer bufferManager( + ProfileChunkedBuffer::ThreadSafety::WithoutMutex, chunkManager); + ProfileBuffer javaBuffer(bufferManager); + CollectJavaThreadProfileData(javaBuffer); + + // Set the thread id of the Android UI thread to be 0. + // We are profiling the Android UI thread twice: Both from the C++ side + // (as a regular C++ profiled thread with the name "AndroidUI"), and from + // the Java side. The thread's actual ID is mozilla::jni::GetUIThreadId(), + // but since we're using that ID for the C++ side, we need to pick another + // tid that doesn't conflict with it for the Java side. So we just use 0. + // Once we add support for profiling of other java threads, we'll have to + // get their thread id and name via JNI. + RefPtr<ThreadInfo> threadInfo = new ThreadInfo( + "AndroidUI (JVM)", 0, false, CorePS::ProcessStartTime()); + ProfiledThreadData profiledThreadData(threadInfo, nullptr); + profiledThreadData.StreamJSON( + javaBuffer, nullptr, aWriter, CorePS::ProcessName(aLock), + CorePS::ETLDplus1(aLock), CorePS::ProcessStartTime(), aSinceTime, + ActivePS::FeatureJSTracer(aLock), nullptr); + } +#endif + + UniquePtr<char[]> baseProfileThreads = + ActivePS::MoveBaseProfileThreads(aLock); + if (baseProfileThreads) { + aWriter.Splice(MakeStringSpan(baseProfileThreads.get())); + } + } + aWriter.EndArray(); + + if (ActivePS::FeatureJSTracer(aLock)) { + aWriter.StartArrayProperty("jsTracerDictionary"); + { + JS::AutoTraceLoggerLockGuard lockGuard; + // Collect Event Dictionary + JS::TraceLoggerDictionaryBuffer collectionBuffer(lockGuard); + while (collectionBuffer.NextChunk()) { + aWriter.StringElement( + MakeStringSpan(collectionBuffer.internalBuffer())); + } + } + aWriter.EndArray(); + } + + aWriter.StartArrayProperty("pausedRanges"); + { buffer.StreamPausedRangesToJSON(aWriter, aSinceTime); } + aWriter.EndArray(); + + const double collectionEndMs = profiler_time(); + + // Record timestamps for the collection into the buffer, so that consumers + // know why we didn't collect any samples for its duration. + // We put these entries into the buffer after we've collected the profile, + // so they'll be visible for the *next* profile collection (if they haven't + // been overwritten due to buffer wraparound by then). + buffer.AddEntry(ProfileBufferEntry::CollectionStart(collectionStartMs)); + buffer.AddEntry(ProfileBufferEntry::CollectionEnd(collectionEndMs)); +} + +bool profiler_stream_json_for_this_process( + SpliceableJSONWriter& aWriter, double aSinceTime, bool aIsShuttingDown, + ProfilerCodeAddressService* aService) { + LOG("profiler_stream_json_for_this_process"); + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + const auto preRecordedMetaInformation = PreRecordMetaInformation(); + + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + return false; + } + + locked_profiler_stream_json_for_this_process(lock, aWriter, aSinceTime, + preRecordedMetaInformation, + aIsShuttingDown, aService); + return true; +} + +// END saving/streaming code +//////////////////////////////////////////////////////////////////////// + +static char FeatureCategory(uint32_t aFeature) { + if (aFeature & DefaultFeatures()) { + if (aFeature & AvailableFeatures()) { + return 'D'; + } + return 'd'; + } + + if (aFeature & StartupExtraDefaultFeatures()) { + if (aFeature & AvailableFeatures()) { + return 'S'; + } + return 's'; + } + + if (aFeature & AvailableFeatures()) { + return '-'; + } + return 'x'; +} + +// Doesn't exist if aExitCode is 0 +static void PrintUsageThenExit(int aExitCode) { + MOZ_RELEASE_ASSERT(NS_IsMainThread()); + + printf( + "\n" + "Profiler environment variable usage:\n" + "\n" + " MOZ_PROFILER_HELP\n" + " If set to any value, prints this message.\n" + " Use MOZ_BASE_PROFILER_HELP for BaseProfiler help.\n" + "\n" + " MOZ_LOG\n" + " Enables logging. The levels of logging available are\n" + " 'prof:3' (least verbose), 'prof:4', 'prof:5' (most verbose).\n" + "\n" + " MOZ_PROFILER_STARTUP\n" + " If set to any value other than '' or '0'/'N'/'n', starts the\n" + " profiler immediately on start-up.\n" + " Useful if you want profile code that runs very early.\n" + "\n" + " MOZ_PROFILER_STARTUP_ENTRIES=<%u..%u>\n" + " If MOZ_PROFILER_STARTUP is set, specifies the number of entries per\n" + " process in the profiler's circular buffer when the profiler is first\n" + " started.\n" + " If unset, the platform default is used:\n" + " %u entries per process, or %u when MOZ_PROFILER_STARTUP is set.\n" + " (%u bytes per entry -> %u or %u total bytes per process)\n" + "\n" + " MOZ_PROFILER_STARTUP_DURATION=<1..>\n" + " If MOZ_PROFILER_STARTUP is set, specifies the maximum life time of\n" + " entries in the the profiler's circular buffer when the profiler is\n" + " first started, in seconds.\n" + " If unset, the life time of the entries will only be restricted by\n" + " MOZ_PROFILER_STARTUP_ENTRIES (or its default value), and no\n" + " additional time duration restriction will be applied.\n" + "\n" + " MOZ_PROFILER_STARTUP_INTERVAL=<1..%d>\n" + " If MOZ_PROFILER_STARTUP is set, specifies the sample interval,\n" + " measured in milliseconds, when the profiler is first started.\n" + " If unset, the platform default is used.\n" + "\n" + " MOZ_PROFILER_STARTUP_FEATURES_BITFIELD=<Number>\n" + " If MOZ_PROFILER_STARTUP is set, specifies the profiling features, as\n" + " the integer value of the features bitfield.\n" + " If unset, the value from MOZ_PROFILER_STARTUP_FEATURES is used.\n" + "\n" + " MOZ_PROFILER_STARTUP_FEATURES=<Features>\n" + " If MOZ_PROFILER_STARTUP is set, specifies the profiling features, as\n" + " a comma-separated list of strings.\n" + " Ignored if MOZ_PROFILER_STARTUP_FEATURES_BITFIELD is set.\n" + " If unset, the platform default is used.\n" + "\n" + " Features: (x=unavailable, D/d=default/unavailable,\n" + " S/s=MOZ_PROFILER_STARTUP extra default/unavailable)\n", + unsigned(ActivePS::scMinimumBufferEntries), + unsigned(ActivePS::scMaximumBufferEntries), + unsigned(PROFILER_DEFAULT_ENTRIES.Value()), + unsigned(PROFILER_DEFAULT_STARTUP_ENTRIES.Value()), + unsigned(scBytesPerEntry), + unsigned(PROFILER_DEFAULT_ENTRIES.Value() * scBytesPerEntry), + unsigned(PROFILER_DEFAULT_STARTUP_ENTRIES.Value() * scBytesPerEntry), + PROFILER_MAX_INTERVAL); + +#define PRINT_FEATURE(n_, str_, Name_, desc_) \ + printf(" %c %7u: \"%s\" (%s)\n", FeatureCategory(ProfilerFeature::Name_), \ + ProfilerFeature::Name_, str_, desc_); + + PROFILER_FOR_EACH_FEATURE(PRINT_FEATURE) + +#undef PRINT_FEATURE + + printf( + " - \"default\" (All above D+S defaults)\n" + "\n" + " MOZ_PROFILER_STARTUP_FILTERS=<Filters>\n" + " If MOZ_PROFILER_STARTUP is set, specifies the thread filters, as a\n" + " comma-separated list of strings. A given thread will be sampled if\n" + " any of the filters is a case-insensitive substring of the thread\n" + " name. If unset, a default is used.\n" + "\n" + " MOZ_PROFILER_SHUTDOWN\n" + " If set, the profiler saves a profile to the named file on shutdown.\n" + "\n" + " MOZ_PROFILER_SYMBOLICATE\n" + " If set, the profiler will pre-symbolicate profiles.\n" + " *Note* This will add a significant pause when gathering data, and\n" + " is intended mainly for local development.\n" + "\n" + " MOZ_PROFILER_LUL_TEST\n" + " If set to any value, runs LUL unit tests at startup.\n" + "\n" + " This platform %s native unwinding.\n" + "\n", +#if defined(HAVE_NATIVE_UNWIND) + "supports" +#else + "does not support" +#endif + ); + + if (aExitCode != 0) { + exit(aExitCode); + } +} + +//////////////////////////////////////////////////////////////////////// +// BEGIN Sampler + +#if defined(GP_OS_linux) || defined(GP_OS_android) +struct SigHandlerCoordinator; +#endif + +// Sampler performs setup and teardown of the state required to sample with the +// profiler. Sampler may exist when ActivePS is not present. +// +// SuspendAndSampleAndResumeThread must only be called from a single thread, +// and must not sample the thread it is being called from. A separate Sampler +// instance must be used for each thread which wants to capture samples. + +// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING +// +// With the exception of SamplerThread, all Sampler objects must be Disable-d +// before releasing the lock which was used to create them. This avoids races +// on linux with the SIGPROF signal handler. + +class Sampler { + public: + // Sets up the profiler such that it can begin sampling. + explicit Sampler(PSLockRef aLock); + + // Disable the sampler, restoring it to its previous state. This must be + // called once, and only once, before the Sampler is destroyed. + void Disable(PSLockRef aLock); + + // This method suspends and resumes the samplee thread. It calls the passed-in + // function-like object aProcessRegs (passing it a populated |const + // Registers&| arg) while the samplee thread is suspended. Note that + // the aProcessRegs function must be very careful not to do anything that + // requires a lock, since we may have interrupted the thread at any point. + // As an example, you can't call TimeStamp::Now() since on windows it + // takes a lock on the performance counter. + // + // Func must be a function-like object of type `void()`. + template <typename Func> + void SuspendAndSampleAndResumeThread( + PSLockRef aLock, const RegisteredThread& aRegisteredThread, + const TimeStamp& aNow, const Func& aProcessRegs); + + private: +#if defined(GP_OS_linux) || defined(GP_OS_android) || defined(GP_OS_freebsd) + // Used to restore the SIGPROF handler when ours is removed. + struct sigaction mOldSigprofHandler; + + // This process' ID. Needed as an argument for tgkill in + // SuspendAndSampleAndResumeThread. + int mMyPid; + + // The sampler thread's ID. Used to assert that it is not sampling itself, + // which would lead to deadlock. + int mSamplerTid; + + public: + // This is the one-and-only variable used to communicate between the sampler + // thread and the samplee thread's signal handler. It's static because the + // samplee thread's signal handler is static. + static struct SigHandlerCoordinator* sSigHandlerCoordinator; +#endif +}; + +// END Sampler +//////////////////////////////////////////////////////////////////////// + +// Platform-specific function that retrieves per-thread CPU measurements. +static RunningTimes GetThreadRunningTimesDiff( + PSLockRef aLock, const RegisteredThread& aRegisteredThread); + +// Template function to be used by `GetThreadRunningTimesDiff()` (unless some +// platform has a better way to achieve this). +// It help perform CPU measurements and tie them to a timestamp, such that the +// measurements and timestamp are very close together. +// This is necessary, because the relative CPU usage is computed by dividing +// consecutive CPU measurements by their timestamp difference; if there was an +// unexpected big gap, it could skew this computation and produce impossible +// spikes that would hide the rest of the data. See bug 1685938 for more info. +// Note that this may call the measurement function more than once; it is +// assumed to normally be fast. +// This was verified experimentally, but there is currently no regression +// testing for it; see follow-up bug 1687402. +template <typename GetCPURunningTimesFunction> +RunningTimes GetRunningTimesWithTightTimestamp( + GetCPURunningTimesFunction&& aGetCPURunningTimesFunction) { + // Once per process, compute a threshold over which running times and their + // timestamp is considered too far apart. + static const TimeDuration scMaxRunningTimesReadDuration = [&]() { + // Run the main CPU measurements + timestamp a number of times and capture + // their durations. + constexpr int loops = 128; + TimeDuration durations[loops]; + RunningTimes runningTimes; + TimeStamp before = TimeStamp::NowUnfuzzed(); + for (int i = 0; i < loops; ++i) { + AUTO_PROFILER_STATS(GetRunningTimes_MaxRunningTimesReadDuration); + aGetCPURunningTimesFunction(runningTimes); + const TimeStamp after = TimeStamp::NowUnfuzzed(); + durations[i] = after - before; + before = after; + } + // Move median duration to the middle. + std::nth_element(&durations[0], &durations[loops / 2], &durations[loops]); + // Use median*8 as cut-off point. + // Typical durations should be around a microsecond, the cut-off should then + // be around 10 microseconds, well below the expected minimum inter-sample + // interval (observed as a few milliseconds), so overall this should keep + // cpu/interval spikes + return durations[loops / 2] * 8; + }(); + + // Record CPU measurements between two timestamps. + RunningTimes runningTimes; + TimeStamp before = TimeStamp::NowUnfuzzed(); + aGetCPURunningTimesFunction(runningTimes); + TimeStamp after = TimeStamp::NowUnfuzzed(); + // In most cases, the above should be quick enough. But if not, repeat: + while (MOZ_UNLIKELY(after - before > scMaxRunningTimesReadDuration)) { + AUTO_PROFILER_STATS(GetRunningTimes_REDO); + before = after; + aGetCPURunningTimesFunction(runningTimes); + after = TimeStamp::NowUnfuzzed(); + } + // Finally, record the closest timestamp just after the final measurement was + // done. This must stay *after* the CPU measurements. + runningTimes.SetPostMeasurementTimeStamp(after); + + return runningTimes; +} + +//////////////////////////////////////////////////////////////////////// +// BEGIN SamplerThread + +// The sampler thread controls sampling and runs whenever the profiler is +// active. It periodically runs through all registered threads, finds those +// that should be sampled, then pauses and samples them. + +class SamplerThread { + public: + // Creates a sampler thread, but doesn't start it. + SamplerThread(PSLockRef aLock, uint32_t aActivityGeneration, + double aIntervalMilliseconds); + ~SamplerThread(); + + // This runs on (is!) the sampler thread. + void Run(); + + // This runs on the main thread. + void Stop(PSLockRef aLock); + + void AppendPostSamplingCallback(PSLockRef, PostSamplingCallback&& aCallback) { + // We are under lock, so it's safe to just modify the list pointer. + // Also this means the sampler has not started its run yet, so any callback + // added now will be invoked at the end of the next loop; this guarantees + // that the callback will be invoked after at least one full sampling loop. + mPostSamplingCallbackList = MakeUnique<PostSamplingCallbackListItem>( + std::move(mPostSamplingCallbackList), std::move(aCallback)); + } + + private: + // Item containing a post-sampling callback, and a tail-list of more items. + // Using a linked list means no need to move items when adding more, and + // "stealing" the whole list is one pointer move. + struct PostSamplingCallbackListItem { + UniquePtr<PostSamplingCallbackListItem> mPrev; + PostSamplingCallback mCallback; + + PostSamplingCallbackListItem(UniquePtr<PostSamplingCallbackListItem> aPrev, + PostSamplingCallback&& aCallback) + : mPrev(std::move(aPrev)), mCallback(std::move(aCallback)) {} + }; + + [[nodiscard]] UniquePtr<PostSamplingCallbackListItem> + TakePostSamplingCallbacks(PSLockRef) { + return std::move(mPostSamplingCallbackList); + } + + static void InvokePostSamplingCallbacks( + UniquePtr<PostSamplingCallbackListItem> aCallbacks, + SamplingState aSamplingState) { + if (!aCallbacks) { + return; + } + // We want to drill down to the last element in this list, which is the + // oldest one, so that we invoke them in FIFO order. + // We don't expect many callbacks, so it's safe to recurse. Note that we're + // moving-from the UniquePtr, so the tail will implicitly get destroyed. + InvokePostSamplingCallbacks(std::move(aCallbacks->mPrev), aSamplingState); + // We are going to destroy this item, so we can safely move-from the + // callback before calling it (in case it has an rvalue-ref-qualified call + // operator). + std::move(aCallbacks->mCallback)(aSamplingState); + // It may be tempting for a future maintainer to change aCallbacks into an + // rvalue reference; this will remind them not to do that! + static_assert( + std::is_same_v<decltype(aCallbacks), + UniquePtr<PostSamplingCallbackListItem>>, + "We need to capture the list by-value, to implicitly destroy it"); + } + + // This suspends the calling thread for the given number of microseconds. + // Best effort timing. + void SleepMicro(uint32_t aMicroseconds); + + // The sampler used to suspend and sample threads. + Sampler mSampler; + + // The activity generation, for detecting when the sampler thread must stop. + const uint32_t mActivityGeneration; + + // The interval between samples, measured in microseconds. + const int mIntervalMicroseconds; + + // The OS-specific handle for the sampler thread. +#if defined(GP_OS_windows) + HANDLE mThread; +#elif defined(GP_OS_darwin) || defined(GP_OS_linux) || \ + defined(GP_OS_android) || defined(GP_OS_freebsd) + pthread_t mThread; +#endif + + // Post-sampling callbacks are kept in a simple linked list, which will be + // stolen by the sampler thread at the end of its next run. + UniquePtr<PostSamplingCallbackListItem> mPostSamplingCallbackList; + + SamplerThread(const SamplerThread&) = delete; + void operator=(const SamplerThread&) = delete; +}; + +// [[nodiscard]] static +bool ActivePS::AppendPostSamplingCallback(PSLockRef aLock, + PostSamplingCallback&& aCallback) { + if (!sInstance || !sInstance->mSamplerThread) { + return false; + } + sInstance->mSamplerThread->AppendPostSamplingCallback(aLock, + std::move(aCallback)); + return true; +} + +// This function is required because we need to create a SamplerThread within +// ActivePS's constructor, but SamplerThread is defined after ActivePS. It +// could probably be removed by moving some code around. +static SamplerThread* NewSamplerThread(PSLockRef aLock, uint32_t aGeneration, + double aInterval) { + return new SamplerThread(aLock, aGeneration, aInterval); +} + +// This function is the sampler thread. This implementation is used for all +// targets. +void SamplerThread::Run() { + PR_SetCurrentThreadName("SamplerThread"); + + // Features won't change during this SamplerThread's lifetime, so we can read + // them once and store them locally. + const uint32_t features = []() -> uint32_t { + PSAutoLock lock(gPSMutex); + if (!ActivePS::Exists(lock)) { + // If there is no active profiler, it doesn't matter what we return, + // because this thread will exit before any feature is used. + return 0; + } + return ActivePS::Features(lock); + }(); + + // Not *no*-stack-sampling means we do want stack sampling. + const bool stackSampling = !ProfilerFeature::HasNoStackSampling(features); + + const bool cpuUtilization = ProfilerFeature::HasCPUUtilization(features); + + // Use local ProfileBuffer and underlying buffer to capture the stack. + // (This is to avoid touching the CorePS::CoreBuffer lock while a thread is + // suspended, because that thread could be working with the CorePS::CoreBuffer + // as well.) + mozilla::ProfileBufferChunkManagerSingle localChunkManager( + ProfileBufferChunkManager::scExpectedMaximumStackSize); + ProfileChunkedBuffer localBuffer( + ProfileChunkedBuffer::ThreadSafety::WithoutMutex, localChunkManager); + ProfileBuffer localProfileBuffer(localBuffer); + + // Will be kept between collections, to know what each collection does. + auto previousState = localBuffer.GetState(); + + // This will be set inside the loop, from inside the lock scope, to capture + // all callbacks added before that, but none after the lock is released. + UniquePtr<PostSamplingCallbackListItem> postSamplingCallbacks; + // This will be set inside the loop, before invoking callbacks outside. + SamplingState samplingState{}; + + const TimeDuration sampleInterval = + TimeDuration::FromMicroseconds(mIntervalMicroseconds); + const uint32_t minimumIntervalSleepUs = + static_cast<uint32_t>(mIntervalMicroseconds / 4); + + // This is the scheduled time at which each sampling loop should start. + // It will determine the ideal next sampling start by adding the expected + // interval, unless when sampling runs late -- See end of while() loop. + TimeStamp scheduledSampleStart = TimeStamp::NowUnfuzzed(); + + while (true) { + const TimeStamp sampleStart = TimeStamp::NowUnfuzzed(); + + // This scope is for |lock|. It ends before we sleep below. + { + // There should be no local callbacks left from a previous loop. + MOZ_ASSERT(!postSamplingCallbacks); + + PSAutoLock lock(gPSMutex); + TimeStamp lockAcquired = TimeStamp::NowUnfuzzed(); + + // Move all the post-sampling callbacks locally, so that new ones cannot + // sneak in between the end of the lock scope and the invocation after it. + postSamplingCallbacks = TakePostSamplingCallbacks(lock); + + if (!ActivePS::Exists(lock)) { + // Exit the `while` loop, including the lock scope, before invoking + // callbacks and returning. + samplingState = SamplingState::JustStopped; + break; + } + + // At this point profiler_stop() might have been called, and + // profiler_start() might have been called on another thread. If this + // happens the generation won't match. + if (ActivePS::Generation(lock) != mActivityGeneration) { + samplingState = SamplingState::JustStopped; + // Exit the `while` loop, including the lock scope, before invoking + // callbacks and returning. + break; + } + + ActivePS::ClearExpiredExitProfiles(lock); + + TimeStamp expiredMarkersCleaned = TimeStamp::NowUnfuzzed(); + + if (!ActivePS::IsSamplingPaused(lock)) { + double sampleStartDeltaMs = + (sampleStart - CorePS::ProcessStartTime()).ToMilliseconds(); + ProfileBuffer& buffer = ActivePS::Buffer(lock); + + // handle per-process generic counters + const Vector<BaseProfilerCount*>& counters = CorePS::Counters(lock); + for (auto& counter : counters) { + // create Buffer entries for each counter + buffer.AddEntry(ProfileBufferEntry::CounterId(counter)); + buffer.AddEntry(ProfileBufferEntry::Time(sampleStartDeltaMs)); + // XXX support keyed maps of counts + // In the future, we'll support keyed counters - for example, counters + // with a key which is a thread ID. For "simple" counters we'll just + // use a key of 0. + int64_t count; + uint64_t number; + counter->Sample(count, number); + buffer.AddEntry(ProfileBufferEntry::CounterKey(0)); + buffer.AddEntry(ProfileBufferEntry::Count(count)); + if (number) { + buffer.AddEntry(ProfileBufferEntry::Number(number)); + } + } + TimeStamp countersSampled = TimeStamp::NowUnfuzzed(); + + if (stackSampling || cpuUtilization) { + samplingState = SamplingState::SamplingCompleted; + + const Vector<LiveProfiledThreadData>& liveThreads = + ActivePS::LiveProfiledThreads(lock); + + for (auto& thread : liveThreads) { + RegisteredThread* registeredThread = thread.mRegisteredThread; + ProfiledThreadData* profiledThreadData = + thread.mProfiledThreadData.get(); + RefPtr<ThreadInfo> info = registeredThread->Info(); + + const RunningTimes runningTimesDiff = [&]() { + if (!cpuUtilization) { + // If we don't need CPU measurements, we only need a timestamp. + return RunningTimes(TimeStamp::NowUnfuzzed()); + } + return GetThreadRunningTimesDiff(lock, *registeredThread); + }(); + + const TimeStamp& now = runningTimesDiff.PostMeasurementTimeStamp(); + double threadSampleDeltaMs = + (now - CorePS::ProcessStartTime()).ToMilliseconds(); + + // If the thread is asleep and has been sampled before in the same + // sleep episode, find and copy the previous sample, as that's + // cheaper than taking a new sample. + // However we're using current running times (instead of copying the + // old ones) because some work could have happened. + if (registeredThread->RacyRegisteredThread() + .CanDuplicateLastSampleDueToSleep()) { + const bool dup_ok = ActivePS::Buffer(lock).DuplicateLastSample( + info->ThreadId(), threadSampleDeltaMs, + profiledThreadData->LastSample(), runningTimesDiff); + if (dup_ok) { + continue; + } + } + + AUTO_PROFILER_STATS(gecko_SamplerThread_Run_DoPeriodicSample); + + // Add the thread ID now, so we know its position in the main + // buffer, which is used by some JS data. + // (DoPeriodicSample only knows about the temporary local buffer.) + uint64_t samplePos = + buffer.AddThreadIdEntry(registeredThread->Info()->ThreadId()); + profiledThreadData->LastSample() = Some(samplePos); + + // Also add the time, so it's always there after the thread ID, as + // expected by the parser. (Other stack data is optional.) + buffer.AddEntry(ProfileBufferEntry::TimeBeforeCompactStack( + threadSampleDeltaMs)); + + Maybe<double> unresponsiveDuration_ms; + + // If we have RunningTimes data, store it before the CompactStack. + // Note: It is not stored inside the CompactStack so that it doesn't + // get incorrectly duplicated when the thread is sleeping. + if (!runningTimesDiff.IsEmpty()) { + CorePS::CoreBuffer().PutObjects( + ProfileBufferEntry::Kind::RunningTimes, runningTimesDiff); + } + + if (stackSampling) { + // Suspend the thread and collect its stack data in the local + // buffer. + mSampler.SuspendAndSampleAndResumeThread( + lock, *registeredThread, now, + [&](const Registers& aRegs, const TimeStamp& aNow) { + DoPeriodicSample(lock, *registeredThread, aRegs, samplePos, + localProfileBuffer); + + // For "eventDelay", we want the input delay - but if + // there are no events in the input queue (or even if there + // are), we're interested in how long the delay *would* be + // for an input event now, which would be the time to finish + // the current event + the delay caused by any events + // already in the input queue (plus any High priority + // events). Events at lower priorities (in a + // PrioritizedEventQueue) than Input count for input delay + // only for the duration that they're running, since when + // they finish, any queued input event would run. + // + // Unless we record the time state of all events and queue + // states at all times, this is hard to precisely calculate, + // but we can approximate it well in post-processing with + // RunningEventDelay and RunningEventStart. + // + // RunningEventDelay is the time duration the event was + // queued before starting execution. RunningEventStart is + // the time the event started. (Note: since we care about + // Input event delays on MainThread, for + // PrioritizedEventQueues we return 0 for RunningEventDelay + // if the currently running event has a lower priority than + // Input (since Input events won't queue behind them). + // + // To directly measure this we would need to record the time + // at which the newest event currently in each queue at time + // X (the sample time) finishes running. This of course + // would require looking into the future, or recording all + // this state and then post-processing it later. If we were + // to trace every event start and end we could do this, but + // it would have significant overhead to do so (and buffer + // usage). From a recording of RunningEventDelays and + // RunningEventStarts we can infer the actual delay: + // + // clang-format off + // Event queue: <tail> D : C : B : A <head> + // Time inserted (ms): 40 : 20 : 10 : 0 + // Run Time (ms): 30 : 100 : 40 : 30 + // + // 0 10 20 30 40 50 60 70 80 90 100 110 120 130 140 150 160 170 + // [A||||||||||||] + // ----------[B|||||||||||||||||] + // -------------------------[C|||||||||||||||||||||||||||||||||||||||||||||||] + // -----------------------------------------------------------------[D|||||||||...] + // + // Calculate the delay of a new event added at time t: (run every sample) + // TimeSinceRunningEventBlockedInputEvents = RunningEventDelay + (now - RunningEventStart); + // effective_submission = now - TimeSinceRunningEventBlockedInputEvents; + // delta = (now - last_sample_time); + // last_sample_time = now; + // for (t=effective_submission to now) { + // delay[t] += delta; + // } + // + // Can be reduced in overhead by: + // TimeSinceRunningEventBlockedInputEvents = RunningEventDelay + (now - RunningEventStart); + // effective_submission = now - TimeSinceRunningEventBlockedInputEvents; + // if (effective_submission != last_submission) { + // delta = (now - last_submision); + // // this loop should be made to match each sample point in the range + // // intead of assuming 1ms sampling as this pseudocode does + // for (t=last_submission to effective_submission-1) { + // delay[t] += delta; + // delta -= 1; // assumes 1ms; adjust as needed to match for() + // } + // last_submission = effective_submission; + // } + // + // Time Head of queue Running Event RunningEventDelay Delay of Effective Started Calc (submission->now add 10ms) Final + // hypothetical Submission Running @ result + // event E + // 0 Empty A 0 30 0 0 @0=10 30 + // 10 B A 0 60 0 0 @0=20, @10=10 60 + // 20 B A 0 150 0 0 @0=30, @10=20, @20=10 150 + // 30 C B 20 140 10 30 @10=20, @20=10, @30=0 140 + // 40 C B 20 160 @10=30, @20=20... 160 + // 50 C B 20 150 150 + // 60 C B 20 140 @10=50, @20=40... 140 + // 70 D C 50 130 20 70 @20=50, @30=40... 130 + // ... + // 160 D C 50 40 @20=140, @30=130... 40 + // 170 <empty> D 140 30 40 @40=140, @50=130... (rounding) 30 + // 180 <empty> D 140 20 40 @40=150 20 + // 190 <empty> D 140 10 40 @40=160 10 + // 200 <empty> <empty> 0 0 NA 0 + // + // Function Delay(t) = the time between t and the time at which a hypothetical + // event e would start executing, if e was enqueued at time t. + // + // Delay(-1) = 0 // Before A was enqueued. No wait time, can start running + // // instantly. + // Delay(0) = 30 // The hypothetical event e got enqueued just after A got + // // enqueued. It can start running at 30, when A is done. + // Delay(5) = 25 + // Delay(10) = 60 // Can start running at 70, after both A and B are done. + // Delay(19) = 51 + // Delay(20) = 150 // Can start running at 170, after A, B & C. + // Delay(25) = 145 + // Delay(30) = 170 // Can start running at 200, after A, B, C & D. + // Delay(120) = 80 + // Delay(200) = 0 // (assuming nothing was enqueued after D) + // + // For every event that gets enqueued, the Delay time will go up by the + // event's running time at the time at which the event is enqueued. + // The Delay function will be a sawtooth of the following shape: + // + // |\ |... + // | \ | + // |\ | \ | + // | \ | \ | + // |\ | \ | \ | + // |\ | \| \| \ | + // | \| \ | + // _| \____| + // + // + // A more complex example with a PrioritizedEventQueue: + // + // Event queue: <tail> D : C : B : A <head> + // Time inserted (ms): 40 : 20 : 10 : 0 + // Run Time (ms): 30 : 100 : 40 : 30 + // Priority: Input: Norm: Norm: Norm + // + // 0 10 20 30 40 50 60 70 80 90 100 110 120 130 140 150 160 170 + // [A||||||||||||] + // ----------[B|||||||||||||||||] + // ----------------------------------------[C|||||||||||||||||||||||||||||||||||||||||||||||] + // ---------------[D||||||||||||] + // + // + // Time Head of queue Running Event RunningEventDelay Delay of Effective Started Calc (submission->now add 10ms) Final + // hypothetical Submission Running @ result + // event + // 0 Empty A 0 30 0 0 @0=10 30 + // 10 B A 0 20 0 0 @0=20, @10=10 20 + // 20 B A 0 10 0 0 @0=30, @10=20, @20=10 10 + // 30 C B 0 40 30 30 @30=10 40 + // 40 C B 0 60 30 @40=10, @30=20 60 + // 50 C B 0 50 30 @50=10, @40=20, @30=30 50 + // 60 C B 0 40 30 @60=10, @50=20, @40=30, @30=40 40 + // 70 C D 30 30 40 70 @60=20, @50=30, @40=40 30 + // 80 C D 30 20 40 70 ...@50=40, @40=50 20 + // 90 C D 30 10 40 70 ...@60=40, @50=50, @40=60 10 + // 100 <empty> C 0 100 100 100 @100=10 100 + // 110 <empty> C 0 90 100 100 @110=10, @100=20 90 + + // + // For PrioritizedEventQueue, the definition of the Delay(t) function is adjusted: the hypothetical event e has Input priority. + // Delay(-1) = 0 // Before A was enqueued. No wait time, can start running + // // instantly. + // Delay(0) = 30 // The hypothetical input event e got enqueued just after A got + // // enqueued. It can start running at 30, when A is done. + // Delay(5) = 25 + // Delay(10) = 20 + // Delay(25) = 5 // B has been queued, but e does not need to wait for B because e has Input priority and B does not. + // // So e can start running at 30, when A is done. + // Delay(30) = 40 // Can start running at 70, after B is done. + // Delay(40) = 60 // Can start at 100, after B and D are done (D is Input Priority) + // Delay(80) = 20 + // Delay(100) = 100 // Wait for C to finish + + // clang-format on + // + // Alternatively we could insert (recycled instead of + // allocated/freed) input events at every sample period + // (1ms...), and use them to back-calculate the delay. This + // might also be somewhat expensive, and would require + // guessing at the maximum delay, which would likely be in + // the seconds, and so you'd need 1000's of pre-allocated + // events per queue per thread - so there would be a memory + // impact as well. + + TimeDuration currentEventDelay; + TimeDuration currentEventRunning; + registeredThread->GetRunningEventDelay( + aNow, currentEventDelay, currentEventRunning); + + // Note: eventDelay is a different definition of + // responsiveness than the 16ms event injection. + + // Don't suppress 0's for now; that can be a future + // optimization. We probably want one zero to be stored + // before we start suppressing, which would be more + // complex. + unresponsiveDuration_ms = + Some(currentEventDelay.ToMilliseconds() + + currentEventRunning.ToMilliseconds()); + }); + + // If we got eventDelay data, store it before the CompactStack. + // Note: It is not stored inside the CompactStack so that it + // doesn't get incorrectly duplicated when the thread is sleeping. + if (unresponsiveDuration_ms.isSome()) { + CorePS::CoreBuffer().PutObjects( + ProfileBufferEntry::Kind::UnresponsiveDurationMs, + *unresponsiveDuration_ms); + } + } + + // There *must* be a CompactStack after a TimeBeforeCompactStack; + // but note that other entries may have been concurrently inserted + // between the TimeBeforeCompactStack above and now. If the captured + // sample from `DoPeriodicSample` is complete, copy it into the + // global buffer, otherwise add an empty one to satisfy the parser + // that expects one. + auto state = localBuffer.GetState(); + if (NS_WARN_IF(state.mFailedPutBytes != + previousState.mFailedPutBytes)) { + LOG("Stack sample too big for local storage, failed to store %u " + "bytes", + unsigned(state.mFailedPutBytes - + previousState.mFailedPutBytes)); + // There *must* be a CompactStack after a TimeBeforeCompactStack, + // even an empty one. + CorePS::CoreBuffer().PutObjects( + ProfileBufferEntry::Kind::CompactStack, + UniquePtr<ProfileChunkedBuffer>(nullptr)); + } else if (state.mRangeEnd - previousState.mRangeEnd >= + *CorePS::CoreBuffer().BufferLength()) { + LOG("Stack sample too big for profiler storage, needed %u bytes", + unsigned(state.mRangeEnd - previousState.mRangeEnd)); + // There *must* be a CompactStack after a TimeBeforeCompactStack, + // even an empty one. + CorePS::CoreBuffer().PutObjects( + ProfileBufferEntry::Kind::CompactStack, + UniquePtr<ProfileChunkedBuffer>(nullptr)); + } else { + CorePS::CoreBuffer().PutObjects( + ProfileBufferEntry::Kind::CompactStack, localBuffer); + } + + // Clean up for the next run. + localBuffer.Clear(); + previousState = localBuffer.GetState(); + } + } else { + samplingState = SamplingState::NoStackSamplingCompleted; + } + +#if defined(USE_LUL_STACKWALK) + // The LUL unwind object accumulates frame statistics. Periodically we + // should poke it to give it a chance to print those statistics. This + // involves doing I/O (fprintf, __android_log_print, etc.) and so + // can't safely be done from the critical section inside + // SuspendAndSampleAndResumeThread, which is why it is done here. + CorePS::Lul(lock)->MaybeShowStats(); +#endif + TimeStamp threadsSampled = TimeStamp::NowUnfuzzed(); + + { + AUTO_PROFILER_STATS(Sampler_FulfillChunkRequests); + ActivePS::FulfillChunkRequests(lock); + } + + buffer.CollectOverheadStats(sampleStartDeltaMs, + lockAcquired - sampleStart, + expiredMarkersCleaned - lockAcquired, + countersSampled - expiredMarkersCleaned, + threadsSampled - countersSampled); + } else { + samplingState = SamplingState::SamplingPaused; + } + } + // gPSMutex is not held after this point. + + // Invoke end-of-sampling callbacks outside of the locked scope. + InvokePostSamplingCallbacks(std::move(postSamplingCallbacks), + samplingState); + + ProfilerChild::ProcessPendingUpdate(); + + // We expect the next sampling loop to start `sampleInterval` after this + // loop here was scheduled to start. + scheduledSampleStart += sampleInterval; + + // Try to sleep until we reach that next scheduled time. + const TimeStamp beforeSleep = TimeStamp::NowUnfuzzed(); + if (scheduledSampleStart >= beforeSleep) { + // There is still time before the next scheduled sample time. + const uint32_t sleepTimeUs = static_cast<uint32_t>( + (scheduledSampleStart - beforeSleep).ToMicroseconds()); + if (sleepTimeUs >= minimumIntervalSleepUs) { + SleepMicro(sleepTimeUs); + } else { + // If we're too close to that time, sleep the minimum amount of time. + // Note that the next scheduled start is not shifted, so at the end of + // the next loop, sleep may again be adjusted to get closer to schedule. + SleepMicro(minimumIntervalSleepUs); + } + } else { + // This sampling loop ended after the next sampling should have started! + // There is little point to try and keep up to schedule now, it would + // require more work, while it's likely we're late because the system is + // already busy. Try and restart a normal schedule from now. + scheduledSampleStart = beforeSleep + sampleInterval; + SleepMicro(static_cast<uint32_t>(sampleInterval.ToMicroseconds())); + } + } + + // End of `while` loop. We can only be here from a `break` inside the loop. + InvokePostSamplingCallbacks(std::move(postSamplingCallbacks), samplingState); +} + +// We #include these files directly because it means those files can use +// declarations from this file trivially. These provide target-specific +// implementations of all SamplerThread methods except Run(). +#if defined(GP_OS_windows) +# include "platform-win32.cpp" +#elif defined(GP_OS_darwin) +# include "platform-macos.cpp" +#elif defined(GP_OS_linux) || defined(GP_OS_android) || defined(GP_OS_freebsd) +# include "platform-linux-android.cpp" +#else +# error "bad platform" +#endif + +UniquePlatformData AllocPlatformData(int aThreadId) { + return UniquePlatformData(new PlatformData(aThreadId)); +} + +void PlatformDataDestructor::operator()(PlatformData* aData) { delete aData; } + +// END SamplerThread +//////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////// +// BEGIN externally visible functions + +MOZ_DEFINE_MALLOC_SIZE_OF(GeckoProfilerMallocSizeOf) + +NS_IMETHODIMP +GeckoProfilerReporter::CollectReports(nsIHandleReportCallback* aHandleReport, + nsISupports* aData, bool aAnonymize) { + MOZ_RELEASE_ASSERT(NS_IsMainThread()); + + size_t profSize = 0; + size_t lulSize = 0; + + { + PSAutoLock lock(gPSMutex); + + if (CorePS::Exists()) { + CorePS::AddSizeOf(lock, GeckoProfilerMallocSizeOf, profSize, lulSize); + } + + if (ActivePS::Exists(lock)) { + profSize += ActivePS::SizeOf(lock, GeckoProfilerMallocSizeOf); + } + } + + MOZ_COLLECT_REPORT( + "explicit/profiler/profiler-state", KIND_HEAP, UNITS_BYTES, profSize, + "Memory used by the Gecko Profiler's global state (excluding memory used " + "by LUL)."); + +#if defined(USE_LUL_STACKWALK) + MOZ_COLLECT_REPORT( + "explicit/profiler/lul", KIND_HEAP, UNITS_BYTES, lulSize, + "Memory used by LUL, a stack unwinder used by the Gecko Profiler."); +#endif + + return NS_OK; +} + +NS_IMPL_ISUPPORTS(GeckoProfilerReporter, nsIMemoryReporter) + +static uint32_t ParseFeature(const char* aFeature, bool aIsStartup) { + if (strcmp(aFeature, "default") == 0) { + return (aIsStartup ? (DefaultFeatures() | StartupExtraDefaultFeatures()) + : DefaultFeatures()) & + AvailableFeatures(); + } + +#define PARSE_FEATURE_BIT(n_, str_, Name_, desc_) \ + if (strcmp(aFeature, str_) == 0) { \ + return ProfilerFeature::Name_; \ + } + + PROFILER_FOR_EACH_FEATURE(PARSE_FEATURE_BIT) + +#undef PARSE_FEATURE_BIT + + printf("\nUnrecognized feature \"%s\".\n\n", aFeature); + // Since we may have an old feature we don't implement anymore, don't exit + PrintUsageThenExit(0); + return 0; +} + +uint32_t ParseFeaturesFromStringArray(const char** aFeatures, + uint32_t aFeatureCount, + bool aIsStartup /* = false */) { + uint32_t features = 0; + for (size_t i = 0; i < aFeatureCount; i++) { + features |= ParseFeature(aFeatures[i], aIsStartup); + } + return features; +} + +static bool IsRegisteredThreadInRegisteredThreadsList( + PSLockRef aLock, RegisteredThread* aThread) { + const auto& registeredThreads = CorePS::RegisteredThreads(aLock); + for (const auto& registeredThread : registeredThreads) { + if (registeredThread.get() == aThread) { + return true; + } + } + + return false; +} + +static ProfilingStack* locked_register_thread(PSLockRef aLock, + const char* aName, + void* aStackTop) { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + VTUNE_REGISTER_THREAD(aName); + + if (!TLSRegisteredThread::IsTLSInited()) { + return nullptr; + } + + RefPtr<ThreadInfo> info = + new ThreadInfo(aName, profiler_current_thread_id(), NS_IsMainThread()); + UniquePtr<RegisteredThread> registeredThread = MakeUnique<RegisteredThread>( + info, NS_GetCurrentThreadNoCreate(), aStackTop); + + TLSRegisteredThread::SetRegisteredThreadAndAutoProfilerLabelProfilingStack( + aLock, registeredThread.get()); + + if (ActivePS::Exists(aLock) && ActivePS::ShouldProfileThread(aLock, info)) { + registeredThread->RacyRegisteredThread().SetIsBeingProfiled(true); + nsCOMPtr<nsIEventTarget> eventTarget = registeredThread->GetEventTarget(); + ProfiledThreadData* profiledThreadData = ActivePS::AddLiveProfiledThread( + aLock, registeredThread.get(), + MakeUnique<ProfiledThreadData>(info, eventTarget)); + + if (ActivePS::FeatureJS(aLock)) { + // This StartJSSampling() call is on-thread, so we can poll manually to + // start JS sampling immediately. + registeredThread->StartJSSampling(ActivePS::JSFlags(aLock)); + registeredThread->PollJSSampling(); + if (registeredThread->GetJSContext()) { + profiledThreadData->NotifyReceivedJSContext( + ActivePS::Buffer(aLock).BufferRangeEnd()); + } + } + } + + MOZ_RELEASE_ASSERT(TLSRegisteredThread::RegisteredThread(aLock), + "TLS should be set when registering thread"); + MOZ_RELEASE_ASSERT( + registeredThread == TLSRegisteredThread::RegisteredThread(aLock), + "TLS should be set as expected when registering thread"); + + ProfilingStack* profilingStack = + ®isteredThread->RacyRegisteredThread().ProfilingStack(); + + CorePS::AppendRegisteredThread(aLock, std::move(registeredThread)); + + return profilingStack; +} + +static void NotifyObservers(const char* aTopic, + nsISupports* aSubject = nullptr) { + if (!NS_IsMainThread()) { + // Dispatch a task to the main thread that notifies observers. + // If NotifyObservers is called both on and off the main thread within a + // short time, the order of the notifications can be different from the + // order of the calls to NotifyObservers. + // Getting the order 100% right isn't that important at the moment, because + // these notifications are only observed in the parent process, where the + // profiler_* functions are currently only called on the main thread. + nsCOMPtr<nsISupports> subject = aSubject; + NS_DispatchToMainThread(NS_NewRunnableFunction( + "NotifyObservers", [=] { NotifyObservers(aTopic, subject); })); + return; + } + + if (nsCOMPtr<nsIObserverService> os = services::GetObserverService()) { + os->NotifyObservers(aSubject, aTopic, nullptr); + } +} + +static void NotifyProfilerStarted(const PowerOfTwo32& aCapacity, + const Maybe<double>& aDuration, + double aInterval, uint32_t aFeatures, + const char** aFilters, uint32_t aFilterCount, + uint64_t aActiveBrowsingContextID) { + nsTArray<nsCString> filtersArray; + for (size_t i = 0; i < aFilterCount; ++i) { + filtersArray.AppendElement(aFilters[i]); + } + + nsCOMPtr<nsIProfilerStartParams> params = new nsProfilerStartParams( + aCapacity.Value(), aDuration, aInterval, aFeatures, + std::move(filtersArray), aActiveBrowsingContextID); + + ProfilerParent::ProfilerStarted(params); + NotifyObservers("profiler-started", params); +} + +static void locked_profiler_start(PSLockRef aLock, PowerOfTwo32 aCapacity, + double aInterval, uint32_t aFeatures, + const char** aFilters, uint32_t aFilterCount, + uint64_t aActiveBrowsingContextID, + const Maybe<double>& aDuration); + +// This basically duplicates AutoProfilerLabel's constructor. +static void* MozGlueLabelEnter(const char* aLabel, const char* aDynamicString, + void* aSp) { + ProfilingStackOwner* profilingStackOwner = + AutoProfilerLabel::ProfilingStackOwnerTLS::Get(); + if (profilingStackOwner) { + profilingStackOwner->ProfilingStack().pushLabelFrame( + aLabel, aDynamicString, aSp, JS::ProfilingCategoryPair::OTHER); + } + return profilingStackOwner; +} + +// This basically duplicates AutoProfilerLabel's destructor. +static void MozGlueLabelExit(void* aProfilingStackOwner) { + if (aProfilingStackOwner) { + reinterpret_cast<ProfilingStackOwner*>(aProfilingStackOwner) + ->ProfilingStack() + .pop(); + } +} + +static Vector<const char*> SplitAtCommas(const char* aString, + UniquePtr<char[]>& aStorage) { + size_t len = strlen(aString); + aStorage = MakeUnique<char[]>(len + 1); + PodCopy(aStorage.get(), aString, len + 1); + + // Iterate over all characters in aStorage and split at commas, by + // overwriting commas with the null char. + Vector<const char*> array; + size_t currentElementStart = 0; + for (size_t i = 0; i <= len; i++) { + if (aStorage[i] == ',') { + aStorage[i] = '\0'; + } + if (aStorage[i] == '\0') { + MOZ_RELEASE_ASSERT(array.append(&aStorage[currentElementStart])); + currentElementStart = i + 1; + } + } + return array; +} + +void profiler_init_threadmanager() { + LOG("profiler_init_threadmanager"); + + PSAutoLock lock(gPSMutex); + RegisteredThread* registeredThread = + TLSRegisteredThread::RegisteredThread(lock); + if (registeredThread && !registeredThread->GetEventTarget()) { + registeredThread->ResetMainThread(NS_GetCurrentThreadNoCreate()); + } +} + +void profiler_init(void* aStackTop) { + LOG("profiler_init"); + + scProfilerMainThreadId = profiler_current_thread_id(); + + VTUNE_INIT(); + + MOZ_RELEASE_ASSERT(!CorePS::Exists()); + + if (getenv("MOZ_PROFILER_HELP")) { + PrintUsageThenExit(1); // terminates execution + } + + // This must be before any TLS access (e.g.: Thread registration, labels...). + TLSRegisteredThread::Init(); + + SharedLibraryInfo::Initialize(); + + uint32_t features = DefaultFeatures() & AvailableFeatures(); + + UniquePtr<char[]> filterStorage; + + Vector<const char*> filters; + MOZ_RELEASE_ASSERT(filters.append("GeckoMain")); + MOZ_RELEASE_ASSERT(filters.append("Compositor")); + MOZ_RELEASE_ASSERT(filters.append("Renderer")); + MOZ_RELEASE_ASSERT(filters.append("DOM Worker")); + + PowerOfTwo32 capacity = PROFILER_DEFAULT_ENTRIES; + Maybe<double> duration = Nothing(); + double interval = PROFILER_DEFAULT_INTERVAL; + + { + PSAutoLock lock(gPSMutex); + + // We've passed the possible failure point. Instantiate CorePS, which + // indicates that the profiler has initialized successfully. + CorePS::Create(lock); + + // profiler_init implicitly registers this thread as main thread. + Unused << locked_register_thread(lock, kMainThreadName, aStackTop); + + // Platform-specific initialization. + PlatformInit(lock); + +#ifdef MOZ_TASK_TRACER + tasktracer::InitTaskTracer(); +#endif + +#if defined(GP_OS_android) + if (jni::IsAvailable()) { + GeckoJavaSampler::Init(); + } +#endif + + // (Linux-only) We could create CorePS::mLul and read unwind info into it + // at this point. That would match the lifetime implied by destruction of + // it in profiler_shutdown() just below. However, that gives a big delay on + // startup, even if no profiling is actually to be done. So, instead, it is + // created on demand at the first call to PlatformStart(). + + const char* startupEnv = getenv("MOZ_PROFILER_STARTUP"); + if (!startupEnv || startupEnv[0] == '\0' || + ((startupEnv[0] == '0' || startupEnv[0] == 'N' || + startupEnv[0] == 'n') && + startupEnv[1] == '\0')) { + return; + } + + LOG("- MOZ_PROFILER_STARTUP is set"); + + // Startup default capacity may be different. + capacity = PROFILER_DEFAULT_STARTUP_ENTRIES; + + const char* startupCapacity = getenv("MOZ_PROFILER_STARTUP_ENTRIES"); + if (startupCapacity && startupCapacity[0] != '\0') { + errno = 0; + long capacityLong = strtol(startupCapacity, nullptr, 10); + // `long` could be 32 or 64 bits, so we force a 64-bit comparison with + // the maximum 32-bit signed number (as more than that is clamped down to + // 2^31 anyway). + if (errno == 0 && capacityLong > 0 && + static_cast<uint64_t>(capacityLong) <= + static_cast<uint64_t>(INT32_MAX)) { + capacity = PowerOfTwo32(ActivePS::ClampToAllowedEntries( + static_cast<uint32_t>(capacityLong))); + LOG("- MOZ_PROFILER_STARTUP_ENTRIES = %u", unsigned(capacity.Value())); + } else { + LOG("- MOZ_PROFILER_STARTUP_ENTRIES not a valid integer: %s", + startupCapacity); + PrintUsageThenExit(1); + } + } + + const char* startupDuration = getenv("MOZ_PROFILER_STARTUP_DURATION"); + if (startupDuration && startupDuration[0] != '\0') { + errno = 0; + double durationVal = PR_strtod(startupDuration, nullptr); + if (errno == 0 && durationVal >= 0.0) { + if (durationVal > 0.0) { + duration = Some(durationVal); + } + LOG("- MOZ_PROFILER_STARTUP_DURATION = %f", durationVal); + } else { + LOG("- MOZ_PROFILER_STARTUP_DURATION not a valid float: %s", + startupDuration); + PrintUsageThenExit(1); + } + } + + const char* startupInterval = getenv("MOZ_PROFILER_STARTUP_INTERVAL"); + if (startupInterval && startupInterval[0] != '\0') { + errno = 0; + interval = PR_strtod(startupInterval, nullptr); + if (errno == 0 && interval > 0.0 && interval <= PROFILER_MAX_INTERVAL) { + LOG("- MOZ_PROFILER_STARTUP_INTERVAL = %f", interval); + } else { + LOG("- MOZ_PROFILER_STARTUP_INTERVAL not a valid float: %s", + startupInterval); + PrintUsageThenExit(1); + } + } + + features |= StartupExtraDefaultFeatures() & AvailableFeatures(); + + const char* startupFeaturesBitfield = + getenv("MOZ_PROFILER_STARTUP_FEATURES_BITFIELD"); + if (startupFeaturesBitfield && startupFeaturesBitfield[0] != '\0') { + errno = 0; + features = strtol(startupFeaturesBitfield, nullptr, 10); + if (errno == 0 && features != 0) { + LOG("- MOZ_PROFILER_STARTUP_FEATURES_BITFIELD = %d", features); + } else { + LOG("- MOZ_PROFILER_STARTUP_FEATURES_BITFIELD not a valid integer: %s", + startupFeaturesBitfield); + PrintUsageThenExit(1); + } + } else { + const char* startupFeatures = getenv("MOZ_PROFILER_STARTUP_FEATURES"); + if (startupFeatures && startupFeatures[0] != '\0') { + // Interpret startupFeatures as a list of feature strings, separated by + // commas. + UniquePtr<char[]> featureStringStorage; + Vector<const char*> featureStringArray = + SplitAtCommas(startupFeatures, featureStringStorage); + features = ParseFeaturesFromStringArray(featureStringArray.begin(), + featureStringArray.length(), + /* aIsStartup */ true); + LOG("- MOZ_PROFILER_STARTUP_FEATURES = %d", features); + } + } + + const char* startupFilters = getenv("MOZ_PROFILER_STARTUP_FILTERS"); + if (startupFilters && startupFilters[0] != '\0') { + filters = SplitAtCommas(startupFilters, filterStorage); + LOG("- MOZ_PROFILER_STARTUP_FILTERS = %s", startupFilters); + } + + locked_profiler_start(lock, capacity, interval, features, filters.begin(), + filters.length(), 0, duration); + } + +#if defined(MOZ_REPLACE_MALLOC) && defined(MOZ_PROFILER_MEMORY) + // Start counting memory allocations (outside of lock because this may call + // profiler_add_sampled_counter which would attempt to take the lock.) + mozilla::profiler::install_memory_hooks(); +#endif + + // We do this with gPSMutex unlocked. The comment in profiler_stop() explains + // why. + NotifyProfilerStarted(capacity, duration, interval, features, filters.begin(), + filters.length(), 0); +} + +static void locked_profiler_save_profile_to_file( + PSLockRef aLock, const char* aFilename, + const PreRecordedMetaInformation& aPreRecordedMetaInformation, + bool aIsShuttingDown); + +static SamplerThread* locked_profiler_stop(PSLockRef aLock); + +void profiler_shutdown(IsFastShutdown aIsFastShutdown) { + LOG("profiler_shutdown"); + + VTUNE_SHUTDOWN(); + + MOZ_RELEASE_ASSERT(NS_IsMainThread()); + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + const auto preRecordedMetaInformation = PreRecordMetaInformation(); + + ProfilerParent::ProfilerWillStopIfStarted(); + + // If the profiler is active we must get a handle to the SamplerThread before + // ActivePS is destroyed, in order to delete it. + SamplerThread* samplerThread = nullptr; + { + PSAutoLock lock(gPSMutex); + + // Save the profile on shutdown if requested. + if (ActivePS::Exists(lock)) { + const char* filename = getenv("MOZ_PROFILER_SHUTDOWN"); + if (filename) { + locked_profiler_save_profile_to_file(lock, filename, + preRecordedMetaInformation, + /* aIsShuttingDown */ true); + } + if (aIsFastShutdown == IsFastShutdown::Yes) { + return; + } + + samplerThread = locked_profiler_stop(lock); + } else if (aIsFastShutdown == IsFastShutdown::Yes) { + return; + } + + CorePS::Destroy(lock); + + // We just destroyed CorePS and the ThreadInfos it contains, so we can + // clear this thread's TLSRegisteredThread. + TLSRegisteredThread::ResetRegisteredThread(lock); + // We can also clear the AutoProfilerLabel's ProfilingStack because the + // main thread should not use labels after profiler_shutdown. + TLSRegisteredThread::ResetAutoProfilerLabelProfilingStack(lock); + +#ifdef MOZ_TASK_TRACER + tasktracer::ShutdownTaskTracer(); +#endif + } + + // We do these operations with gPSMutex unlocked. The comments in + // profiler_stop() explain why. + if (samplerThread) { + ProfilerParent::ProfilerStopped(); + NotifyObservers("profiler-stopped"); + delete samplerThread; + } +} + +static bool WriteProfileToJSONWriter(SpliceableChunkedJSONWriter& aWriter, + double aSinceTime, bool aIsShuttingDown, + ProfilerCodeAddressService* aService) { + LOG("WriteProfileToJSONWriter"); + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + aWriter.Start(); + { + if (!profiler_stream_json_for_this_process(aWriter, aSinceTime, + aIsShuttingDown, aService)) { + return false; + } + + // Don't include profiles from other processes because this is a + // synchronous function. + aWriter.StartArrayProperty("processes"); + aWriter.EndArray(); + } + aWriter.End(); + return true; +} + +void profiler_set_process_name(const nsACString& aProcessName, + const nsACString* aETLDplus1) { + LOG("profiler_set_process_name(\"%s\", \"%s\")", aProcessName.Data(), + aETLDplus1 ? aETLDplus1->Data() : "<none>"); + PSAutoLock lock(gPSMutex); + CorePS::SetProcessName(lock, aProcessName); + if (aETLDplus1) { + CorePS::SetETLDplus1(lock, *aETLDplus1); + } +} + +UniquePtr<char[]> profiler_get_profile(double aSinceTime, + bool aIsShuttingDown) { + LOG("profiler_get_profile"); + + UniquePtr<ProfilerCodeAddressService> service = + profiler_code_address_service_for_presymbolication(); + + SpliceableChunkedJSONWriter b; + if (!WriteProfileToJSONWriter(b, aSinceTime, aIsShuttingDown, + service.get())) { + return nullptr; + } + return b.ChunkedWriteFunc().CopyData(); +} + +void profiler_get_profile_json_into_lazily_allocated_buffer( + const std::function<char*(size_t)>& aAllocator, double aSinceTime, + bool aIsShuttingDown) { + LOG("profiler_get_profile_json_into_lazily_allocated_buffer"); + + UniquePtr<ProfilerCodeAddressService> service = + profiler_code_address_service_for_presymbolication(); + + SpliceableChunkedJSONWriter b; + if (!WriteProfileToJSONWriter(b, aSinceTime, aIsShuttingDown, + service.get())) { + return; + } + + b.ChunkedWriteFunc().CopyDataIntoLazilyAllocatedBuffer(aAllocator); +} + +void profiler_get_start_params(int* aCapacity, Maybe<double>* aDuration, + double* aInterval, uint32_t* aFeatures, + Vector<const char*>* aFilters, + uint64_t* aActiveBrowsingContextID) { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + if (NS_WARN_IF(!aCapacity) || NS_WARN_IF(!aDuration) || + NS_WARN_IF(!aInterval) || NS_WARN_IF(!aFeatures) || + NS_WARN_IF(!aFilters)) { + return; + } + + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + *aCapacity = 0; + *aDuration = Nothing(); + *aInterval = 0; + *aFeatures = 0; + *aActiveBrowsingContextID = 0; + aFilters->clear(); + return; + } + + *aCapacity = ActivePS::Capacity(lock).Value(); + *aDuration = ActivePS::Duration(lock); + *aInterval = ActivePS::Interval(lock); + *aFeatures = ActivePS::Features(lock); + *aActiveBrowsingContextID = ActivePS::ActiveBrowsingContextID(lock); + + const Vector<std::string>& filters = ActivePS::Filters(lock); + MOZ_ALWAYS_TRUE(aFilters->resize(filters.length())); + for (uint32_t i = 0; i < filters.length(); ++i) { + (*aFilters)[i] = filters[i].c_str(); + } +} + +ProfileBufferControlledChunkManager* profiler_get_controlled_chunk_manager() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + PSAutoLock lock(gPSMutex); + if (NS_WARN_IF(!ActivePS::Exists(lock))) { + return nullptr; + } + return &ActivePS::ControlledChunkManager(lock); +} + +namespace mozilla { + +void GetProfilerEnvVarsForChildProcess( + std::function<void(const char* key, const char* value)>&& aSetEnv) { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + aSetEnv("MOZ_PROFILER_STARTUP", ""); + return; + } + + aSetEnv("MOZ_PROFILER_STARTUP", "1"); + + // Hidden option to stop Base Profiler, mostly due to Talos intermittents, + // see https://bugzilla.mozilla.org/show_bug.cgi?id=1638851#c3 + // TODO: Investigate root cause and remove this in bugs 1648324 and 1648325. + if (getenv("MOZ_PROFILER_STARTUP_NO_BASE")) { + aSetEnv("MOZ_PROFILER_STARTUP_NO_BASE", "1"); + } + + auto capacityString = + Smprintf("%u", unsigned(ActivePS::Capacity(lock).Value())); + aSetEnv("MOZ_PROFILER_STARTUP_ENTRIES", capacityString.get()); + + // Use AppendFloat instead of Smprintf with %f because the decimal + // separator used by %f is locale-dependent. But the string we produce needs + // to be parseable by strtod, which only accepts the period character as a + // decimal separator. AppendFloat always uses the period character. + nsCString intervalString; + intervalString.AppendFloat(ActivePS::Interval(lock)); + aSetEnv("MOZ_PROFILER_STARTUP_INTERVAL", intervalString.get()); + + auto featuresString = Smprintf("%d", ActivePS::Features(lock)); + aSetEnv("MOZ_PROFILER_STARTUP_FEATURES_BITFIELD", featuresString.get()); + + std::string filtersString; + const Vector<std::string>& filters = ActivePS::Filters(lock); + for (uint32_t i = 0; i < filters.length(); ++i) { + if (i != 0) { + filtersString += ","; + } + filtersString += filters[i]; + } + aSetEnv("MOZ_PROFILER_STARTUP_FILTERS", filtersString.c_str()); +} + +} // namespace mozilla + +void profiler_received_exit_profile(const nsCString& aExitProfile) { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + PSAutoLock lock(gPSMutex); + if (!ActivePS::Exists(lock)) { + return; + } + ActivePS::AddExitProfile(lock, aExitProfile); +} + +Vector<nsCString> profiler_move_exit_profiles() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + PSAutoLock lock(gPSMutex); + Vector<nsCString> profiles; + if (ActivePS::Exists(lock)) { + profiles = ActivePS::MoveExitProfiles(lock); + } + return profiles; +} + +static void locked_profiler_save_profile_to_file( + PSLockRef aLock, const char* aFilename, + const PreRecordedMetaInformation& aPreRecordedMetaInformation, + bool aIsShuttingDown = false) { + LOG("locked_profiler_save_profile_to_file(%s)", aFilename); + + MOZ_RELEASE_ASSERT(CorePS::Exists() && ActivePS::Exists(aLock)); + + std::ofstream stream; + stream.open(aFilename); + if (stream.is_open()) { + SpliceableJSONWriter w(MakeUnique<OStreamJSONWriteFunc>(stream)); + w.Start(); + { + locked_profiler_stream_json_for_this_process(aLock, w, /* sinceTime */ 0, + aPreRecordedMetaInformation, + aIsShuttingDown, nullptr); + + w.StartArrayProperty("processes"); + Vector<nsCString> exitProfiles = ActivePS::MoveExitProfiles(aLock); + for (auto& exitProfile : exitProfiles) { + if (!exitProfile.IsEmpty()) { + w.Splice(exitProfile); + } + } + w.EndArray(); + } + w.End(); + + stream.close(); + } +} + +void profiler_save_profile_to_file(const char* aFilename) { + LOG("profiler_save_profile_to_file(%s)", aFilename); + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + const auto preRecordedMetaInformation = PreRecordMetaInformation(); + + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + return; + } + + locked_profiler_save_profile_to_file(lock, aFilename, + preRecordedMetaInformation); +} + +uint32_t profiler_get_available_features() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + return AvailableFeatures(); +} + +Maybe<ProfilerBufferInfo> profiler_get_buffer_info() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + return Nothing(); + } + + return Some(ActivePS::Buffer(lock).GetProfilerBufferInfo()); +} + +static void PollJSSamplingForCurrentThread() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + PSAutoLock lock(gPSMutex); + + RegisteredThread* registeredThread = + TLSRegisteredThread::RegisteredThread(lock); + if (!registeredThread) { + return; + } + + registeredThread->PollJSSampling(); +} + +// When the profiler is started on a background thread, we can't synchronously +// call PollJSSampling on the main thread's ThreadInfo. And the next regular +// call to PollJSSampling on the main thread would only happen once the main +// thread triggers a JS interrupt callback. +// This means that all the JS execution between profiler_start() and the first +// JS interrupt would happen with JS sampling disabled, and we wouldn't get any +// JS function information for that period of time. +// So in order to start JS sampling as soon as possible, we dispatch a runnable +// to the main thread which manually calls PollJSSamplingForCurrentThread(). +// In some cases this runnable will lose the race with the next JS interrupt. +// That's fine; PollJSSamplingForCurrentThread() is immune to redundant calls. +static void TriggerPollJSSamplingOnMainThread() { + nsCOMPtr<nsIThread> mainThread; + nsresult rv = NS_GetMainThread(getter_AddRefs(mainThread)); + if (NS_SUCCEEDED(rv) && mainThread) { + nsCOMPtr<nsIRunnable> task = + NS_NewRunnableFunction("TriggerPollJSSamplingOnMainThread", + []() { PollJSSamplingForCurrentThread(); }); + SchedulerGroup::Dispatch(TaskCategory::Other, task.forget()); + } +} + +static bool HasMinimumLength(const char* aString, size_t aMinimumLength) { + if (!aString) { + return false; + } + for (size_t i = 0; i < aMinimumLength; ++i) { + if (aString[i] == '\0') { + return false; + } + } + return true; +} + +static void locked_profiler_start(PSLockRef aLock, PowerOfTwo32 aCapacity, + double aInterval, uint32_t aFeatures, + const char** aFilters, uint32_t aFilterCount, + uint64_t aActiveBrowsingContextID, + const Maybe<double>& aDuration) { + if (LOG_TEST) { + LOG("locked_profiler_start"); + LOG("- capacity = %u", unsigned(aCapacity.Value())); + LOG("- duration = %.2f", aDuration ? *aDuration : -1); + LOG("- interval = %.2f", aInterval); + LOG("- browsing context ID = %" PRIu64, aActiveBrowsingContextID); + +#define LOG_FEATURE(n_, str_, Name_, desc_) \ + if (ProfilerFeature::Has##Name_(aFeatures)) { \ + LOG("- feature = %s", str_); \ + } + + PROFILER_FOR_EACH_FEATURE(LOG_FEATURE) + +#undef LOG_FEATURE + + for (uint32_t i = 0; i < aFilterCount; i++) { + LOG("- threads = %s", aFilters[i]); + } + } + + MOZ_RELEASE_ASSERT(CorePS::Exists() && !ActivePS::Exists(aLock)); + + UniquePtr<char[]> baseprofile; + if (baseprofiler::profiler_is_active()) { + // Note that we still hold the lock, so the sampler cannot run yet and + // interact negatively with the still-active BaseProfiler sampler. + // Assume that Base Profiler is active because of MOZ_PROFILER_STARTUP. + // Capture the Base Profiler startup profile threads (if any). + baseprofile = baseprofiler::profiler_get_profile( + /* aSinceTime */ 0, /* aIsShuttingDown */ false, + /* aOnlyThreads */ true); + + // Now stop Base Profiler (BP), as further recording will be ignored anyway, + // and so that it won't clash with Gecko Profiler (GP) sampling starting + // after the lock is dropped. + // On Linux this is especially important to do before creating the GP + // sampler, because the BP sampler may send a signal (to stop threads to be + // sampled), which the GP would intercept before its own initialization is + // complete and ready to handle such signals. + // Note that even though `profiler_stop()` doesn't immediately destroy and + // join the sampler thread, it safely deactivates it in such a way that the + // thread will soon exit without doing any actual work. + // TODO: Allow non-sampling profiling to continue. + // TODO: Re-start BP after GP shutdown, to capture post-XPCOM shutdown. + baseprofiler::profiler_stop(); + } + +#if defined(GP_PLAT_amd64_windows) + InitializeWin64ProfilerHooks(); +#endif + + // Fall back to the default values if the passed-in values are unreasonable. + // We want to be able to store at least one full stack. + PowerOfTwo32 capacity = + (aCapacity.Value() >= + ProfileBufferChunkManager::scExpectedMaximumStackSize / scBytesPerEntry) + ? aCapacity + : PROFILER_DEFAULT_ENTRIES; + Maybe<double> duration = aDuration; + + if (aDuration && *aDuration <= 0) { + duration = Nothing(); + } + + double interval = aInterval > 0 ? aInterval : PROFILER_DEFAULT_INTERVAL; + + ActivePS::Create(aLock, capacity, interval, aFeatures, aFilters, aFilterCount, + aActiveBrowsingContextID, duration); + + // ActivePS::Create can only succeed or crash. + MOZ_ASSERT(ActivePS::Exists(aLock)); + + // An "empty" profile string may in fact contain 1 character (a newline), so + // we want at least 2 characters to register a profile. + if (HasMinimumLength(baseprofile.get(), 2)) { + // The BaseProfiler startup profile will be stored as a separate "process" + // in the Gecko Profiler profile, and shown as a new track under the + // corresponding Gecko Profiler thread. + ActivePS::AddBaseProfileThreads(aLock, std::move(baseprofile)); + } + + // Set up profiling for each registered thread, if appropriate. +#if defined(MOZ_REPLACE_MALLOC) && defined(MOZ_PROFILER_MEMORY) + bool isMainThreadBeingProfiled = false; +#endif + int tid = profiler_current_thread_id(); + const Vector<UniquePtr<RegisteredThread>>& registeredThreads = + CorePS::RegisteredThreads(aLock); + for (auto& registeredThread : registeredThreads) { + RefPtr<ThreadInfo> info = registeredThread->Info(); + + if (ActivePS::ShouldProfileThread(aLock, info)) { + registeredThread->RacyRegisteredThread().SetIsBeingProfiled(true); + nsCOMPtr<nsIEventTarget> eventTarget = registeredThread->GetEventTarget(); + ProfiledThreadData* profiledThreadData = ActivePS::AddLiveProfiledThread( + aLock, registeredThread.get(), + MakeUnique<ProfiledThreadData>(info, eventTarget)); + if (ActivePS::FeatureJS(aLock)) { + registeredThread->StartJSSampling(ActivePS::JSFlags(aLock)); + if (info->ThreadId() == tid) { + // We can manually poll the current thread so it starts sampling + // immediately. + registeredThread->PollJSSampling(); + } else if (info->IsMainThread()) { + // Dispatch a runnable to the main thread to call PollJSSampling(), + // so that we don't have wait for the next JS interrupt callback in + // order to start profiling JS. + TriggerPollJSSamplingOnMainThread(); + } + } +#if defined(MOZ_REPLACE_MALLOC) && defined(MOZ_PROFILER_MEMORY) + if (info->IsMainThread()) { + isMainThreadBeingProfiled = true; + } +#endif + registeredThread->RacyRegisteredThread().ReinitializeOnResume(); + if (registeredThread->GetJSContext()) { + profiledThreadData->NotifyReceivedJSContext(0); + } + } + } + + // Setup support for pushing/popping labels in mozglue. + RegisterProfilerLabelEnterExit(MozGlueLabelEnter, MozGlueLabelExit); + +#ifdef MOZ_TASK_TRACER + if (ActivePS::FeatureTaskTracer(aLock)) { + tasktracer::StartLogging(); + } +#endif + +#if defined(GP_OS_android) + if (ActivePS::FeatureJava(aLock)) { + int javaInterval = interval; + // Java sampling doesn't accurately keep up with the sampling rate that is + // lower than 1ms. + if (javaInterval < 1) { + javaInterval = 1; + } + // Send the interval-relative entry count, but we have 100000 hard cap in + // the java code, it can't be more than that. + java::GeckoJavaSampler::Start( + javaInterval, std::round((double)(capacity.Value()) * interval / + (double)(javaInterval))); + } +#endif + +#if defined(MOZ_REPLACE_MALLOC) && defined(MOZ_PROFILER_MEMORY) + if (ActivePS::FeatureNativeAllocations(aLock)) { + if (isMainThreadBeingProfiled) { + mozilla::profiler::enable_native_allocations(); + } else { + NS_WARNING( + "The nativeallocations feature is turned on, but the main thread is " + "not being profiled. The allocations are only stored on the main " + "thread."); + } + } +#endif + + if (ProfilerFeature::HasAudioCallbackTracing(aFeatures)) { + StartAudioCallbackTracing(); + } + + // At the very end, set up RacyFeatures. + RacyFeatures::SetActive(ActivePS::Features(aLock)); +} + +void profiler_start(PowerOfTwo32 aCapacity, double aInterval, + uint32_t aFeatures, const char** aFilters, + uint32_t aFilterCount, uint64_t aActiveBrowsingContextID, + const Maybe<double>& aDuration) { + LOG("profiler_start"); + + ProfilerParent::ProfilerWillStopIfStarted(); + + SamplerThread* samplerThread = nullptr; + { + PSAutoLock lock(gPSMutex); + + // Initialize if necessary. + if (!CorePS::Exists()) { + profiler_init(nullptr); + } + + // Reset the current state if the profiler is running. + if (ActivePS::Exists(lock)) { + samplerThread = locked_profiler_stop(lock); + } + + locked_profiler_start(lock, aCapacity, aInterval, aFeatures, aFilters, + aFilterCount, aActiveBrowsingContextID, aDuration); + } + +#if defined(MOZ_REPLACE_MALLOC) && defined(MOZ_PROFILER_MEMORY) + // Start counting memory allocations (outside of lock because this may call + // profiler_add_sampled_counter which would attempt to take the lock.) + mozilla::profiler::install_memory_hooks(); +#endif + + // We do these operations with gPSMutex unlocked. The comments in + // profiler_stop() explain why. + if (samplerThread) { + ProfilerParent::ProfilerStopped(); + NotifyObservers("profiler-stopped"); + delete samplerThread; + } + NotifyProfilerStarted(aCapacity, aDuration, aInterval, aFeatures, aFilters, + aFilterCount, aActiveBrowsingContextID); +} + +void profiler_ensure_started(PowerOfTwo32 aCapacity, double aInterval, + uint32_t aFeatures, const char** aFilters, + uint32_t aFilterCount, + uint64_t aActiveBrowsingContextID, + const Maybe<double>& aDuration) { + LOG("profiler_ensure_started"); + + ProfilerParent::ProfilerWillStopIfStarted(); + + bool startedProfiler = false; + SamplerThread* samplerThread = nullptr; + { + PSAutoLock lock(gPSMutex); + + // Initialize if necessary. + if (!CorePS::Exists()) { + profiler_init(nullptr); + } + + if (ActivePS::Exists(lock)) { + // The profiler is active. + if (!ActivePS::Equals(lock, aCapacity, aDuration, aInterval, aFeatures, + aFilters, aFilterCount, aActiveBrowsingContextID)) { + // Stop and restart with different settings. + samplerThread = locked_profiler_stop(lock); + locked_profiler_start(lock, aCapacity, aInterval, aFeatures, aFilters, + aFilterCount, aActiveBrowsingContextID, + aDuration); + startedProfiler = true; + } + } else { + // The profiler is stopped. + locked_profiler_start(lock, aCapacity, aInterval, aFeatures, aFilters, + aFilterCount, aActiveBrowsingContextID, aDuration); + startedProfiler = true; + } + } + + // We do these operations with gPSMutex unlocked. The comments in + // profiler_stop() explain why. + if (samplerThread) { + ProfilerParent::ProfilerStopped(); + NotifyObservers("profiler-stopped"); + delete samplerThread; + } + + if (startedProfiler) { + NotifyProfilerStarted(aCapacity, aDuration, aInterval, aFeatures, aFilters, + aFilterCount, aActiveBrowsingContextID); + } +} + +[[nodiscard]] static SamplerThread* locked_profiler_stop(PSLockRef aLock) { + LOG("locked_profiler_stop"); + + MOZ_RELEASE_ASSERT(CorePS::Exists() && ActivePS::Exists(aLock)); + + // At the very start, clear RacyFeatures. + RacyFeatures::SetInactive(); + + if (ActivePS::FeatureAudioCallbackTracing(aLock)) { + StopAudioCallbackTracing(); + } + +#if defined(GP_OS_android) + if (ActivePS::FeatureJava(aLock)) { + java::GeckoJavaSampler::Stop(); + } +#endif + +#ifdef MOZ_TASK_TRACER + if (ActivePS::FeatureTaskTracer(aLock)) { + tasktracer::StopLogging(); + } +#endif + + // Remove support for pushing/popping labels in mozglue. + RegisterProfilerLabelEnterExit(nullptr, nullptr); + + // Stop sampling live threads. + int tid = profiler_current_thread_id(); + const Vector<LiveProfiledThreadData>& liveProfiledThreads = + ActivePS::LiveProfiledThreads(aLock); + for (auto& thread : liveProfiledThreads) { + RegisteredThread* registeredThread = thread.mRegisteredThread; + registeredThread->RacyRegisteredThread().SetIsBeingProfiled(false); + if (ActivePS::FeatureJS(aLock)) { + registeredThread->StopJSSampling(); + RefPtr<ThreadInfo> info = registeredThread->Info(); + if (info->ThreadId() == tid) { + // We can manually poll the current thread so it stops profiling + // immediately. + registeredThread->PollJSSampling(); + } else if (info->IsMainThread()) { + // Dispatch a runnable to the main thread to call PollJSSampling(), + // so that we don't have wait for the next JS interrupt callback in + // order to start profiling JS. + TriggerPollJSSamplingOnMainThread(); + } + } + } + +#if defined(MOZ_REPLACE_MALLOC) && defined(MOZ_PROFILER_MEMORY) + if (ActivePS::FeatureNativeAllocations(aLock)) { + mozilla::profiler::disable_native_allocations(); + } +#endif + + // The Stop() call doesn't actually stop Run(); that happens in this + // function's caller when the sampler thread is destroyed. Stop() just gives + // the SamplerThread a chance to do some cleanup with gPSMutex locked. + SamplerThread* samplerThread = ActivePS::Destroy(aLock); + samplerThread->Stop(aLock); + + return samplerThread; +} + +void profiler_stop() { + LOG("profiler_stop"); + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + ProfilerParent::ProfilerWillStopIfStarted(); + +#if defined(MOZ_REPLACE_MALLOC) && defined(MOZ_PROFILER_MEMORY) + // Remove the hooks early, as native allocations (if they are on) can be + // quite expensive. + mozilla::profiler::remove_memory_hooks(); +#endif + + SamplerThread* samplerThread; + { + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + return; + } + + samplerThread = locked_profiler_stop(lock); + } + + // We notify observers with gPSMutex unlocked. Otherwise we might get a + // deadlock, if code run by these functions calls a profiler function that + // locks gPSMutex, for example when it wants to insert a marker. + // (This has been seen in practise in bug 1346356, when we were still firing + // these notifications synchronously.) + ProfilerParent::ProfilerStopped(); + NotifyObservers("profiler-stopped"); + + // We delete with gPSMutex unlocked. Otherwise we would get a deadlock: we + // would be waiting here with gPSMutex locked for SamplerThread::Run() to + // return so the join operation within the destructor can complete, but Run() + // needs to lock gPSMutex to return. + // + // Because this call occurs with gPSMutex unlocked, it -- including the final + // iteration of Run()'s loop -- must be able detect deactivation and return + // in a way that's safe with respect to other gPSMutex-locking operations + // that may have occurred in the meantime. + delete samplerThread; +} + +bool profiler_is_paused() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + return false; + } + + return ActivePS::IsPaused(lock); +} + +/* [[nodiscard]] */ bool profiler_callback_after_sampling( + PostSamplingCallback&& aCallback) { + LOG("profiler_callback_after_sampling"); + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + PSAutoLock lock(gPSMutex); + + return ActivePS::AppendPostSamplingCallback(lock, std::move(aCallback)); +} + +void profiler_pause() { + LOG("profiler_pause"); + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + { + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + return; + } + +#if defined(GP_OS_android) + if (ActivePS::FeatureJava(lock) && !ActivePS::IsSamplingPaused(lock)) { + // Not paused yet, so this is the first pause, let Java know. + // TODO: Distinguish Pause and PauseSampling in Java. + java::GeckoJavaSampler::PauseSampling(); + } +#endif + + RacyFeatures::SetPaused(); + ActivePS::SetIsPaused(lock, true); + ActivePS::Buffer(lock).AddEntry(ProfileBufferEntry::Pause(profiler_time())); + } + + // gPSMutex must be unlocked when we notify, to avoid potential deadlocks. + ProfilerParent::ProfilerPaused(); + NotifyObservers("profiler-paused"); +} + +void profiler_resume() { + LOG("profiler_resume"); + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + { + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + return; + } + + ActivePS::Buffer(lock).AddEntry( + ProfileBufferEntry::Resume(profiler_time())); + ActivePS::SetIsPaused(lock, false); + RacyFeatures::SetUnpaused(); + +#if defined(GP_OS_android) + if (ActivePS::FeatureJava(lock) && !ActivePS::IsSamplingPaused(lock)) { + // Not paused anymore, so this is the last unpause, let Java know. + // TODO: Distinguish Unpause and UnpauseSampling in Java. + java::GeckoJavaSampler::UnpauseSampling(); + } +#endif + } + + // gPSMutex must be unlocked when we notify, to avoid potential deadlocks. + ProfilerParent::ProfilerResumed(); + NotifyObservers("profiler-resumed"); +} + +bool profiler_is_sampling_paused() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + return false; + } + + return ActivePS::IsSamplingPaused(lock); +} + +void profiler_pause_sampling() { + LOG("profiler_pause_sampling"); + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + { + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + return; + } + +#if defined(GP_OS_android) + if (ActivePS::FeatureJava(lock) && !ActivePS::IsSamplingPaused(lock)) { + // Not paused yet, so this is the first pause, let Java know. + // TODO: Distinguish Pause and PauseSampling in Java. + java::GeckoJavaSampler::PauseSampling(); + } +#endif + + RacyFeatures::SetSamplingPaused(); + ActivePS::SetIsSamplingPaused(lock, true); + ActivePS::Buffer(lock).AddEntry( + ProfileBufferEntry::PauseSampling(profiler_time())); + } + + // gPSMutex must be unlocked when we notify, to avoid potential deadlocks. + ProfilerParent::ProfilerPausedSampling(); + NotifyObservers("profiler-paused-sampling"); +} + +void profiler_resume_sampling() { + LOG("profiler_resume_sampling"); + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + { + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + return; + } + + ActivePS::Buffer(lock).AddEntry( + ProfileBufferEntry::ResumeSampling(profiler_time())); + ActivePS::SetIsSamplingPaused(lock, false); + RacyFeatures::SetSamplingUnpaused(); + +#if defined(GP_OS_android) + if (ActivePS::FeatureJava(lock) && !ActivePS::IsSamplingPaused(lock)) { + // Not paused anymore, so this is the last unpause, let Java know. + // TODO: Distinguish Unpause and UnpauseSampling in Java. + java::GeckoJavaSampler::UnpauseSampling(); + } +#endif + } + + // gPSMutex must be unlocked when we notify, to avoid potential deadlocks. + ProfilerParent::ProfilerResumedSampling(); + NotifyObservers("profiler-resumed-sampling"); +} + +bool profiler_feature_active(uint32_t aFeature) { + // This function runs both on and off the main thread. + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + // This function is hot enough that we use RacyFeatures, not ActivePS. + return RacyFeatures::IsActiveWithFeature(aFeature); +} + +void profiler_write_active_configuration(JSONWriter& aWriter) { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + PSAutoLock lock(gPSMutex); + ActivePS::WriteActiveConfiguration(lock, aWriter); +} + +void profiler_add_sampled_counter(BaseProfilerCount* aCounter) { + DEBUG_LOG("profiler_add_sampled_counter(%s)", aCounter->mLabel); + PSAutoLock lock(gPSMutex); + CorePS::AppendCounter(lock, aCounter); +} + +void profiler_remove_sampled_counter(BaseProfilerCount* aCounter) { + DEBUG_LOG("profiler_remove_sampled_counter(%s)", aCounter->mLabel); + PSAutoLock lock(gPSMutex); + // Note: we don't enforce a final sample, though we could do so if the + // profiler was active + CorePS::RemoveCounter(lock, aCounter); +} + +ProfilingStack* profiler_register_thread(const char* aName, + void* aGuessStackTop) { + DEBUG_LOG("profiler_register_thread(%s)", aName); + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + // Make sure we have a nsThread wrapper for the current thread, and that NSPR + // knows its name. + (void)NS_GetCurrentThread(); + NS_SetCurrentThreadName(aName); + + if (!TLSRegisteredThread::IsTLSInited()) { + return nullptr; + } + + PSAutoLock lock(gPSMutex); + + if (RegisteredThread* thread = TLSRegisteredThread::RegisteredThread(lock)) { + MOZ_RELEASE_ASSERT(IsRegisteredThreadInRegisteredThreadsList(lock, thread), + "Thread being re-registered is not in registered thread " + "list even though its TLS is non-null"); + MOZ_RELEASE_ASSERT( + thread->Info()->ThreadId() == profiler_current_thread_id(), + "Thread being re-registered has changed its TID"); + LOG("profiler_register_thread(%s) - thread %d already registered as %s", + aName, profiler_current_thread_id(), thread->Info()->Name()); + // TODO: Use new name. This is currently not possible because the + // RegisteredThread's ThreadInfo cannot be changed. + // In the meantime, we record a marker that could be used in the frontend. + nsCString text("Thread "); + text.AppendInt(profiler_current_thread_id()); + text.AppendLiteral(" \""); + text.AppendASCII(thread->Info()->Name()); + text.AppendLiteral("\" attempted to re-register as \""); + text.AppendASCII(aName); + text.AppendLiteral("\""); + PROFILER_MARKER_TEXT("profiler_register_thread again", OTHER_Profiling, + MarkerThreadId::MainThread(), text); + + return &thread->RacyRegisteredThread().ProfilingStack(); + } + + void* stackTop = GetStackTop(aGuessStackTop); + return locked_register_thread(lock, aName, stackTop); +} + +void profiler_unregister_thread() { + PSAutoLock lock(gPSMutex); + + if (!TLSRegisteredThread::IsTLSInited()) { + return; + } + + if (!CorePS::Exists()) { + // This function can be called after the main thread has already shut down. + // We want to reset the AutoProfilerLabel's ProfilingStack pointer (if + // needed), because a thread could stay registered after the profiler has + // shut down. + TLSRegisteredThread::ResetAutoProfilerLabelProfilingStack(lock); + return; + } + + // We don't call RegisteredThread::StopJSSampling() here; there's no point + // doing that for a JS thread that is in the process of disappearing. + + if (RegisteredThread* registeredThread = + TLSRegisteredThread::RegisteredThread(lock)) { + MOZ_RELEASE_ASSERT( + IsRegisteredThreadInRegisteredThreadsList(lock, registeredThread), + "Thread being unregistered is not in registered thread list even " + "though its TLS is non-null"); + MOZ_RELEASE_ASSERT( + registeredThread->Info()->ThreadId() == profiler_current_thread_id(), + "Thread being unregistered has changed its TID"); + RefPtr<ThreadInfo> info = registeredThread->Info(); + + DEBUG_LOG("profiler_unregister_thread: %s", info->Name()); + + if (ActivePS::Exists(lock)) { + ActivePS::UnregisterThread(lock, registeredThread); + } + + // Clear the pointer to the RegisteredThread object that we're about to + // destroy, as well as the AutoProfilerLabel's ProfilingStack because the + // thread is unregistering itself and won't need the ProfilingStack anymore. + TLSRegisteredThread::ResetRegisteredThread(lock); + TLSRegisteredThread::ResetAutoProfilerLabelProfilingStack(lock); + + // Remove the thread from the list of registered threads. This deletes the + // registeredThread object. + CorePS::RemoveRegisteredThread(lock, registeredThread); + + MOZ_RELEASE_ASSERT( + !IsRegisteredThreadInRegisteredThreadsList(lock, registeredThread), + "After unregistering, thread should no longer be in the registered " + "thread list"); + MOZ_RELEASE_ASSERT( + !TLSRegisteredThread::RegisteredThread(lock), + "TLS should have been reset after un-registering thread"); + } else { + // There are two ways TLSRegisteredThread::RegisteredThread() might be + // empty. + // + // - TLSRegisteredThread::Init() failed in locked_register_thread(). + // + // - We've already called profiler_unregister_thread() for this thread. + // (Whether or not it should, this does happen in practice.) + LOG("profiler_unregister_thread() - thread %d already unregistered", + profiler_current_thread_id()); + // We cannot record a marker on this thread because it was already + // unregistered. Send it to the main thread (unless this *is* already the + // main thread, which has been unregistered); this may be useful to catch + // mismatched register/unregister pairs in Firefox. + if (int tid = profiler_current_thread_id(); + tid != profiler_main_thread_id()) { + nsCString threadIdString; + threadIdString.AppendInt(tid); + PROFILER_MARKER_TEXT("profiler_unregister_thread again", OTHER_Profiling, + MarkerThreadId::MainThread(), threadIdString); + } + } +} + +void profiler_register_page(uint64_t aBrowsingContextID, + uint64_t aInnerWindowID, const nsCString& aUrl, + uint64_t aEmbedderInnerWindowID) { + DEBUG_LOG("profiler_register_page(%" PRIu64 ", %" PRIu64 ", %s, %" PRIu64 ")", + aBrowsingContextID, aInnerWindowID, aUrl.get(), + aEmbedderInnerWindowID); + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + PSAutoLock lock(gPSMutex); + + // When a Browsing context is first loaded, the first url loaded in it will be + // about:blank. Because of that, this call keeps the first non-about:blank + // registration of window and discards the previous one. + RefPtr<PageInformation> pageInfo = new PageInformation( + aBrowsingContextID, aInnerWindowID, aUrl, aEmbedderInnerWindowID); + CorePS::AppendRegisteredPage(lock, std::move(pageInfo)); + + // After appending the given page to CorePS, look for the expired + // pages and remove them if there are any. + if (ActivePS::Exists(lock)) { + ActivePS::DiscardExpiredPages(lock); + } +} + +void profiler_unregister_page(uint64_t aRegisteredInnerWindowID) { + PSAutoLock lock(gPSMutex); + + if (!CorePS::Exists()) { + // This function can be called after the main thread has already shut down. + return; + } + + // During unregistration, if the profiler is active, we have to keep the + // page information since there may be some markers associated with the given + // page. But if profiler is not active. we have no reason to keep the + // page information here because there can't be any marker associated with it. + if (ActivePS::Exists(lock)) { + ActivePS::UnregisterPage(lock, aRegisteredInnerWindowID); + } else { + CorePS::RemoveRegisteredPage(lock, aRegisteredInnerWindowID); + } +} + +void profiler_clear_all_pages() { + { + PSAutoLock lock(gPSMutex); + + if (!CorePS::Exists()) { + // This function can be called after the main thread has already shut + // down. + return; + } + + CorePS::ClearRegisteredPages(lock); + if (ActivePS::Exists(lock)) { + ActivePS::ClearUnregisteredPages(lock); + } + } + + // gPSMutex must be unlocked when we notify, to avoid potential deadlocks. + ProfilerParent::ClearAllPages(); +} + +Maybe<uint64_t> profiler_get_inner_window_id_from_docshell( + nsIDocShell* aDocshell) { + Maybe<uint64_t> innerWindowID = Nothing(); + if (aDocshell) { + auto outerWindow = aDocshell->GetWindow(); + if (outerWindow) { + auto innerWindow = outerWindow->GetCurrentInnerWindow(); + if (innerWindow) { + innerWindowID = Some(innerWindow->WindowID()); + } + } + } + return innerWindowID; +} + +void profiler_thread_sleep() { + // This function runs both on and off the main thread. + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + RacyRegisteredThread* racyRegisteredThread = + TLSRegisteredThread::RacyRegisteredThread(); + if (!racyRegisteredThread) { + return; + } + + racyRegisteredThread->SetSleeping(); +} + +void profiler_thread_wake() { + // This function runs both on and off the main thread. + + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + RacyRegisteredThread* racyRegisteredThread = + TLSRegisteredThread::RacyRegisteredThread(); + if (!racyRegisteredThread) { + return; + } + + racyRegisteredThread->SetAwake(); +} + +bool mozilla::profiler::detail::IsThreadBeingProfiled() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + const RacyRegisteredThread* racyRegisteredThread = + TLSRegisteredThread::RacyRegisteredThread(); + return racyRegisteredThread && racyRegisteredThread->IsBeingProfiled(); +} + +bool mozilla::profiler::detail::IsThreadRegistered() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + const RacyRegisteredThread* racyRegisteredThread = + TLSRegisteredThread::RacyRegisteredThread(); + // The simple presence of this TLS pointer is proof that the thread is + // registered. + return !!racyRegisteredThread; +} + +bool profiler_thread_is_sleeping() { + MOZ_RELEASE_ASSERT(NS_IsMainThread()); + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + RacyRegisteredThread* racyRegisteredThread = + TLSRegisteredThread::RacyRegisteredThread(); + if (!racyRegisteredThread) { + return false; + } + return racyRegisteredThread->IsSleeping(); +} + +void profiler_js_interrupt_callback() { + // This function runs on JS threads being sampled. + PollJSSamplingForCurrentThread(); +} + +double profiler_time() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + TimeDuration delta = TimeStamp::NowUnfuzzed() - CorePS::ProcessStartTime(); + return delta.ToMilliseconds(); +} + +bool profiler_capture_backtrace_into(ProfileChunkedBuffer& aChunkedBuffer) { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + PSAutoLock lock(gPSMutex); + + if (!ActivePS::Exists(lock)) { + return false; + } + + RegisteredThread* registeredThread = + TLSRegisteredThread::RegisteredThread(lock); + if (!registeredThread) { + // If this was called from a non-registered thread, return false and do no + // more work. This can happen from a memory hook. Before the allocation + // tracking there was a MOZ_ASSERT() here checking for the existence of a + // registeredThread. + return false; + } + + ProfileBuffer profileBuffer(aChunkedBuffer); + + Registers regs; +#if defined(HAVE_NATIVE_UNWIND) + regs.SyncPopulate(); +#else + regs.Clear(); +#endif + + DoSyncSample(lock, *registeredThread, TimeStamp::NowUnfuzzed(), regs, + profileBuffer); + + return true; +} + +UniquePtr<ProfileChunkedBuffer> profiler_capture_backtrace() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + // Quick is-active check before allocating a buffer. + if (!profiler_is_active()) { + return nullptr; + } + + auto buffer = MakeUnique<ProfileChunkedBuffer>( + ProfileChunkedBuffer::ThreadSafety::WithoutMutex, + MakeUnique<ProfileBufferChunkManagerSingle>( + ProfileBufferChunkManager::scExpectedMaximumStackSize)); + + if (!profiler_capture_backtrace_into(*buffer)) { + return nullptr; + } + + return buffer; +} + +UniqueProfilerBacktrace profiler_get_backtrace() { + UniquePtr<ProfileChunkedBuffer> buffer = profiler_capture_backtrace(); + + if (!buffer) { + return nullptr; + } + + return UniqueProfilerBacktrace( + new ProfilerBacktrace("SyncProfile", std::move(buffer))); +} + +void ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace) { + delete aBacktrace; +} + +// This is a simplified version of profiler_add_marker that can be easily passed +// into the JS engine. +void profiler_add_js_marker(const char* aMarkerName, const char* aMarkerText) { + PROFILER_MARKER_TEXT( + ProfilerString8View::WrapNullTerminatedString(aMarkerName), JS, {}, + ProfilerString8View::WrapNullTerminatedString(aMarkerText)); +} + +void profiler_add_js_allocation_marker(JS::RecordAllocationInfo&& info) { + if (!profiler_can_accept_markers()) { + return; + } + + struct JsAllocationMarker { + static constexpr mozilla::Span<const char> MarkerTypeName() { + return mozilla::MakeStringSpan("JS allocation"); + } + static void StreamJSONMarkerData( + mozilla::baseprofiler::SpliceableJSONWriter& aWriter, + const mozilla::ProfilerString16View& aTypeName, + const mozilla::ProfilerString8View& aClassName, + const mozilla::ProfilerString16View& aDescriptiveTypeName, + const mozilla::ProfilerString8View& aCoarseType, uint64_t aSize, + bool aInNursery) { + if (aClassName.Length() != 0) { + aWriter.StringProperty("className", aClassName); + } + if (aTypeName.Length() != 0) { + aWriter.StringProperty( + "typeName", + NS_ConvertUTF16toUTF8(aTypeName.Data(), aTypeName.Length())); + } + if (aDescriptiveTypeName.Length() != 0) { + aWriter.StringProperty( + "descriptiveTypeName", + NS_ConvertUTF16toUTF8(aDescriptiveTypeName.Data(), + aDescriptiveTypeName.Length())); + } + aWriter.StringProperty("coarseType", aCoarseType); + aWriter.IntProperty("size", aSize); + aWriter.BoolProperty("inNursery", aInNursery); + } + static mozilla::MarkerSchema MarkerTypeDisplay() { + return mozilla::MarkerSchema::SpecialFrontendLocation{}; + } + }; + + profiler_add_marker( + "JS allocation", geckoprofiler::category::JS, MarkerStack::Capture(), + JsAllocationMarker{}, + ProfilerString16View::WrapNullTerminatedString(info.typeName), + ProfilerString8View::WrapNullTerminatedString(info.className), + ProfilerString16View::WrapNullTerminatedString(info.descriptiveTypeName), + ProfilerString8View::WrapNullTerminatedString(info.coarseType), info.size, + info.inNursery); +} + +bool profiler_is_locked_on_current_thread() { + // This function is used to help users avoid calling `profiler_...` functions + // when the profiler may already have a lock in place, which would prevent a + // 2nd recursive lock (resulting in a crash or a never-ending wait), or a + // deadlock between any two mutexes. So we must return `true` for any of: + // - The main profiler mutex, used by most functions, and/or + // - The buffer mutex, used directly in some functions without locking the + // main mutex, e.g., marker-related functions. + // - The ProfilerParent or ProfilerChild mutex, used to store and process + // buffer chunk updates. + return gPSMutex.IsLockedOnCurrentThread() || + CorePS::CoreBuffer().IsThreadSafeAndLockedOnCurrentThread() || + ProfilerParent::IsLockedOnCurrentThread() || + ProfilerChild::IsLockedOnCurrentThread(); +} + +static constexpr net::TimingStruct scEmptyNetTimingStruct; + +void profiler_add_network_marker( + nsIURI* aURI, const nsACString& aRequestMethod, int32_t aPriority, + uint64_t aChannelId, NetworkLoadType aType, mozilla::TimeStamp aStart, + mozilla::TimeStamp aEnd, int64_t aCount, + mozilla::net::CacheDisposition aCacheDisposition, uint64_t aInnerWindowID, + const mozilla::net::TimingStruct* aTimings, nsIURI* aRedirectURI, + UniquePtr<ProfileChunkedBuffer> aSource, + const Maybe<nsDependentCString>& aContentType) { + if (!profiler_can_accept_markers()) { + return; + } + + nsAutoCStringN<2048> name; + name.AppendASCII("Load "); + // top 32 bits are process id of the load + name.AppendInt(aChannelId & 0xFFFFFFFFu); + + // These can do allocations/frees/etc; avoid if not active + nsAutoCStringN<2048> spec; + if (aURI) { + aURI->GetAsciiSpec(spec); + name.AppendASCII(": "); + name.Append(spec); + } + + nsAutoCString redirect_spec; + if (aRedirectURI) { + aRedirectURI->GetAsciiSpec(redirect_spec); + } + + struct NetworkMarker { + static constexpr Span<const char> MarkerTypeName() { + return MakeStringSpan("Network"); + } + static void StreamJSONMarkerData( + baseprofiler::SpliceableJSONWriter& aWriter, mozilla::TimeStamp aStart, + mozilla::TimeStamp aEnd, int64_t aID, const ProfilerString8View& aURI, + const ProfilerString8View& aRequestMethod, NetworkLoadType aType, + int32_t aPri, int64_t aCount, net::CacheDisposition aCacheDisposition, + const net::TimingStruct& aTimings, + const ProfilerString8View& aRedirectURI, + const ProfilerString8View& aContentType) { + // This payload still streams a startTime and endTime property because it + // made the migration to MarkerTiming on the front-end easier. + aWriter.TimeProperty("startTime", aStart); + aWriter.TimeProperty("endTime", aEnd); + + aWriter.IntProperty("id", aID); + aWriter.StringProperty("status", GetNetworkState(aType)); + if (Span<const char> cacheString = GetCacheState(aCacheDisposition); + !cacheString.IsEmpty()) { + aWriter.StringProperty("cache", cacheString); + } + aWriter.IntProperty("pri", aPri); + if (aCount > 0) { + aWriter.IntProperty("count", aCount); + } + if (aURI.Length() != 0) { + aWriter.StringProperty("URI", aURI); + } + if (aRedirectURI.Length() != 0) { + aWriter.StringProperty("RedirectURI", aRedirectURI); + } + aWriter.StringProperty("requestMethod", aRequestMethod); + + if (aContentType.Length() != 0) { + aWriter.StringProperty("contentType", aContentType); + } else { + aWriter.NullProperty("contentType"); + } + + if (aType != NetworkLoadType::LOAD_START) { + aWriter.TimeProperty("domainLookupStart", aTimings.domainLookupStart); + aWriter.TimeProperty("domainLookupEnd", aTimings.domainLookupEnd); + aWriter.TimeProperty("connectStart", aTimings.connectStart); + aWriter.TimeProperty("tcpConnectEnd", aTimings.tcpConnectEnd); + aWriter.TimeProperty("secureConnectionStart", + aTimings.secureConnectionStart); + aWriter.TimeProperty("connectEnd", aTimings.connectEnd); + aWriter.TimeProperty("requestStart", aTimings.requestStart); + aWriter.TimeProperty("responseStart", aTimings.responseStart); + aWriter.TimeProperty("responseEnd", aTimings.responseEnd); + } + } + static MarkerSchema MarkerTypeDisplay() { + return MarkerSchema::SpecialFrontendLocation{}; + } + + private: + static Span<const char> GetNetworkState(NetworkLoadType aType) { + switch (aType) { + case NetworkLoadType::LOAD_START: + return MakeStringSpan("STATUS_START"); + case NetworkLoadType::LOAD_STOP: + return MakeStringSpan("STATUS_STOP"); + case NetworkLoadType::LOAD_REDIRECT: + return MakeStringSpan("STATUS_REDIRECT"); + default: + MOZ_ASSERT(false, "Unexpected NetworkLoadType enum value."); + return MakeStringSpan(""); + } + } + + static Span<const char> GetCacheState( + net::CacheDisposition aCacheDisposition) { + switch (aCacheDisposition) { + case net::kCacheUnresolved: + return MakeStringSpan("Unresolved"); + case net::kCacheHit: + return MakeStringSpan("Hit"); + case net::kCacheHitViaReval: + return MakeStringSpan("HitViaReval"); + case net::kCacheMissedViaReval: + return MakeStringSpan("MissedViaReval"); + case net::kCacheMissed: + return MakeStringSpan("Missed"); + case net::kCacheUnknown: + return MakeStringSpan(""); + default: + MOZ_ASSERT(false, "Unexpected CacheDisposition enum value."); + return MakeStringSpan(""); + } + } + }; + + profiler_add_marker( + name, geckoprofiler::category::NETWORK, + {MarkerTiming::Interval(aStart, aEnd), + MarkerStack::TakeBacktrace(std::move(aSource)), + MarkerInnerWindowId(aInnerWindowID)}, + NetworkMarker{}, aStart, aEnd, static_cast<int64_t>(aChannelId), spec, + aRequestMethod, aType, aPriority, aCount, aCacheDisposition, + aTimings ? *aTimings : scEmptyNetTimingStruct, redirect_spec, + aContentType ? ProfilerString8View(*aContentType) + : ProfilerString8View()); +} + +bool profiler_add_native_allocation_marker(int64_t aSize, + uintptr_t aMemoryAddress) { + if (!profiler_can_accept_markers()) { + return false; + } + + // Because native allocations may be intercepted anywhere, blocking while + // locking the profiler mutex here could end up causing a deadlock if another + // mutex is taken, which the profiler may indirectly need elsewhere. + // See bug 1642726 for such a scenario. + // So instead we bail out if the mutex is already locked. Native allocations + // are statistically sampled anyway, so missing a few because of this is + // acceptable. + if (gPSMutex.IsLockedOnCurrentThread()) { + return false; + } + + struct NativeAllocationMarker { + static constexpr mozilla::Span<const char> MarkerTypeName() { + return mozilla::MakeStringSpan("Native allocation"); + } + static void StreamJSONMarkerData( + mozilla::baseprofiler::SpliceableJSONWriter& aWriter, int64_t aSize, + uintptr_t aMemoryAddress, int aThreadId) { + aWriter.IntProperty("size", aSize); + aWriter.IntProperty("memoryAddress", + static_cast<int64_t>(aMemoryAddress)); + aWriter.IntProperty("threadId", aThreadId); + } + static mozilla::MarkerSchema MarkerTypeDisplay() { + return mozilla::MarkerSchema::SpecialFrontendLocation{}; + } + }; + + profiler_add_marker("Native allocation", geckoprofiler::category::OTHER, + {MarkerThreadId::MainThread(), MarkerStack::Capture()}, + NativeAllocationMarker{}, aSize, aMemoryAddress, + profiler_current_thread_id()); + return true; +} + +void profiler_set_js_context(JSContext* aCx) { + MOZ_ASSERT(aCx); + + PSAutoLock lock(gPSMutex); + + RegisteredThread* registeredThread = + TLSRegisteredThread::RegisteredThread(lock); + if (!registeredThread) { + return; + } + + registeredThread->SetJSContext(aCx); + + // This call is on-thread, so we can call PollJSSampling() to start JS + // sampling immediately. + registeredThread->PollJSSampling(); + + if (ActivePS::Exists(lock)) { + ProfiledThreadData* profiledThreadData = + ActivePS::GetProfiledThreadData(lock, registeredThread); + if (profiledThreadData) { + profiledThreadData->NotifyReceivedJSContext( + ActivePS::Buffer(lock).BufferRangeEnd()); + } + } +} + +void profiler_clear_js_context() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + PSAutoLock lock(gPSMutex); + + RegisteredThread* registeredThread = + TLSRegisteredThread::RegisteredThread(lock); + if (!registeredThread) { + return; + } + + JSContext* cx = registeredThread->GetJSContext(); + if (!cx) { + return; + } + + if (ActivePS::Exists(lock) && ActivePS::FeatureJS(lock)) { + ProfiledThreadData* profiledThreadData = + ActivePS::GetProfiledThreadData(lock, registeredThread); + if (profiledThreadData) { + profiledThreadData->NotifyAboutToLoseJSContext( + cx, CorePS::ProcessStartTime(), ActivePS::Buffer(lock)); + + // Notify the JS context that profiling for this context has stopped. + // Do this by calling StopJSSampling and PollJSSampling before + // nulling out the JSContext. + registeredThread->StopJSSampling(); + registeredThread->PollJSSampling(); + + registeredThread->ClearJSContext(); + + // Tell the thread that we'd like to have JS sampling on this + // thread again, once it gets a new JSContext (if ever). + registeredThread->StartJSSampling(ActivePS::JSFlags(lock)); + return; + } + } + + registeredThread->ClearJSContext(); +} + +// NOTE: aCollector's methods will be called while the target thread is paused. +// Doing things in those methods like allocating -- which may try to claim +// locks -- is a surefire way to deadlock. +void profiler_suspend_and_sample_thread(int aThreadId, uint32_t aFeatures, + ProfilerStackCollector& aCollector, + bool aSampleNative /* = true */) { + // Lock the profiler mutex + PSAutoLock lock(gPSMutex); + + const Vector<UniquePtr<RegisteredThread>>& registeredThreads = + CorePS::RegisteredThreads(lock); + for (auto& thread : registeredThreads) { + RefPtr<ThreadInfo> info = thread->Info(); + RegisteredThread& registeredThread = *thread.get(); + + if (info->ThreadId() == aThreadId) { + if (info->IsMainThread()) { + aCollector.SetIsMainThread(); + } + + // Allocate the space for the native stack + NativeStack nativeStack; + + // Suspend, sample, and then resume the target thread. + Sampler sampler(lock); + TimeStamp now = TimeStamp::Now(); + sampler.SuspendAndSampleAndResumeThread( + lock, registeredThread, now, + [&](const Registers& aRegs, const TimeStamp& aNow) { + // The target thread is now suspended. Collect a native backtrace, + // and call the callback. + bool isSynchronous = false; +#if defined(HAVE_FASTINIT_NATIVE_UNWIND) + if (aSampleNative) { + // We can only use FramePointerStackWalk or MozStackWalk from + // suspend_and_sample_thread as other stackwalking methods may not be + // initialized. +# if defined(USE_FRAME_POINTER_STACK_WALK) + DoFramePointerBacktrace(lock, registeredThread, aRegs, + nativeStack); +# elif defined(USE_MOZ_STACK_WALK) + DoMozStackWalkBacktrace(lock, registeredThread, aRegs, + nativeStack); +# else +# error "Invalid configuration" +# endif + + MergeStacks(aFeatures, isSynchronous, registeredThread, aRegs, + nativeStack, aCollector, CorePS::JsFrames(lock)); + } else +#endif + { + MergeStacks(aFeatures, isSynchronous, registeredThread, aRegs, + nativeStack, aCollector, CorePS::JsFrames(lock)); + + if (ProfilerFeature::HasLeaf(aFeatures)) { + aCollector.CollectNativeLeafAddr((void*)aRegs.mPC); + } + } + }); + + // NOTE: Make sure to disable the sampler before it is destroyed, in case + // the profiler is running at the same time. + sampler.Disable(lock); + break; + } + } +} + +// END externally visible functions +//////////////////////////////////////////////////////////////////////// diff --git a/tools/profiler/core/platform.h b/tools/profiler/core/platform.h new file mode 100644 index 0000000000..a3a508d8b6 --- /dev/null +++ b/tools/profiler/core/platform.h @@ -0,0 +1,300 @@ +// Copyright (c) 2006-2011 The Chromium Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in +// the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google, Inc. nor the names of its contributors +// may be used to endorse or promote products derived from this +// software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +// SUCH DAMAGE. + +#ifndef TOOLS_PLATFORM_H_ +#define TOOLS_PLATFORM_H_ + +#include "PlatformMacros.h" + +#include "mozilla/Logging.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/ProfileBufferEntrySerialization.h" +#include "mozilla/TimeStamp.h" +#include "mozilla/UniquePtr.h" +#include "mozilla/Vector.h" +#include "nsString.h" + +#include <cstddef> +#include <cstdint> +#include <functional> + +class ProfilerCodeAddressService; + +namespace mozilla { +struct SymbolTable; +} + +extern mozilla::LazyLogModule gProfilerLog; + +// These are for MOZ_LOG="prof:3" or higher. It's the default logging level for +// the profiler, and should be used sparingly. +#define LOG_TEST MOZ_LOG_TEST(gProfilerLog, mozilla::LogLevel::Info) +#define LOG(arg, ...) \ + MOZ_LOG(gProfilerLog, mozilla::LogLevel::Info, \ + ("[%d] " arg, profiler_current_process_id(), ##__VA_ARGS__)) + +// These are for MOZ_LOG="prof:4" or higher. It should be used for logging that +// is somewhat more verbose than LOG. +#define DEBUG_LOG_TEST MOZ_LOG_TEST(gProfilerLog, mozilla::LogLevel::Debug) +#define DEBUG_LOG(arg, ...) \ + MOZ_LOG(gProfilerLog, mozilla::LogLevel::Debug, \ + ("[%d] " arg, profiler_current_process_id(), ##__VA_ARGS__)) + +typedef uint8_t* Address; + +// ---------------------------------------------------------------------------- +// Miscellaneous + +class PlatformData; + +// We can't new/delete the type safely without defining it +// (-Wdelete-incomplete). Use these to hide the details from clients. +struct PlatformDataDestructor { + void operator()(PlatformData*); +}; + +typedef mozilla::UniquePtr<PlatformData, PlatformDataDestructor> + UniquePlatformData; +UniquePlatformData AllocPlatformData(int aThreadId); + +namespace mozilla { +class JSONWriter; +} +void AppendSharedLibraries(mozilla::JSONWriter& aWriter); + +// Convert the array of strings to a bitfield. +uint32_t ParseFeaturesFromStringArray(const char** aFeatures, + uint32_t aFeatureCount, + bool aIsStartup = false); + +void profiler_get_profile_json_into_lazily_allocated_buffer( + const std::function<char*(size_t)>& aAllocator, double aSinceTime, + bool aIsShuttingDown); + +// Flags to conveniently track various JS instrumentations. +enum class JSInstrumentationFlags { + StackSampling = 0x1, + TraceLogging = 0x2, + Allocations = 0x4, +}; + +// Record an exit profile from a child process. +void profiler_received_exit_profile(const nsCString& aExitProfile); + +// Write out the information of the active profiling configuration. +void profiler_write_active_configuration(mozilla::JSONWriter& aWriter); + +// Extract all received exit profiles that have not yet expired (i.e., they +// still intersect with this process' buffer range). +mozilla::Vector<nsCString> profiler_move_exit_profiles(); + +// If the "MOZ_PROFILER_SYMBOLICATE" env-var is set, we return a new +// ProfilerCodeAddressService object to use for local symbolication of profiles. +// This is off by default, and mainly intended for local development. +mozilla::UniquePtr<ProfilerCodeAddressService> +profiler_code_address_service_for_presymbolication(); + +extern "C" { +// This function is defined in the profiler rust module at +// tools/profiler/rust-helper. mozilla::SymbolTable and CompactSymbolTable +// have identical memory layout. +bool profiler_get_symbol_table(const char* debug_path, const char* breakpad_id, + mozilla::SymbolTable* symbol_table); + +bool profiler_demangle_rust(const char* mangled, char* buffer, size_t len); +} + +// For each running times value, call MACRO(index, name, unit, jsonProperty) +#define PROFILER_FOR_EACH_RUNNING_TIME(MACRO) \ + MACRO(0, ThreadCPU, Delta, threadCPUDelta) + +// This class contains all "running times" such as CPU usage measurements. +// All measurements are listed in `PROFILER_FOR_EACH_RUNNING_TIME` above. +// Each measurement is optional and only takes a value when explicitly set. +// Two RunningTimes object may be subtracted, to get the difference between +// known values. +class RunningTimes { + public: + constexpr RunningTimes() = default; + + // Constructor with only a timestamp, useful when no measurements will be + // taken. + constexpr explicit RunningTimes(const mozilla::TimeStamp& aTimeStamp) + : mPostMeasurementTimeStamp(aTimeStamp) {} + + constexpr void Clear() { *this = RunningTimes{}; } + + constexpr bool IsEmpty() const { return mKnownBits == 0; } + + // This should be called right after CPU measurements have been taken. + void SetPostMeasurementTimeStamp(const mozilla::TimeStamp& aTimeStamp) { + mPostMeasurementTimeStamp = aTimeStamp; + } + + const mozilla::TimeStamp& PostMeasurementTimeStamp() const { + return mPostMeasurementTimeStamp; + } + + // Should be filled for any registered thread. + +#define RUNNING_TIME_MEMBER(index, name, unit, jsonProperty) \ + constexpr bool Is##name##unit##Known() const { \ + return (mKnownBits & mGot##name##unit) != 0; \ + } \ + \ + constexpr void Clear##name##unit() { \ + m##name##unit = 0; \ + mKnownBits &= ~mGot##name##unit; \ + } \ + \ + constexpr void Reset##name##unit(uint64_t a##name##unit) { \ + m##name##unit = a##name##unit; \ + mKnownBits |= mGot##name##unit; \ + } \ + \ + constexpr void Set##name##unit(uint64_t a##name##unit) { \ + MOZ_ASSERT(!Is##name##unit##Known(), #name #unit " already set"); \ + Reset##name##unit(a##name##unit); \ + } \ + \ + constexpr mozilla::Maybe<uint64_t> Get##name##unit() const { \ + if (Is##name##unit##Known()) { \ + return mozilla::Some(m##name##unit); \ + } \ + return mozilla::Nothing{}; \ + } + + PROFILER_FOR_EACH_RUNNING_TIME(RUNNING_TIME_MEMBER) + +#undef RUNNING_TIME_MEMBER + + // Take values from another RunningTimes. + RunningTimes& TakeFrom(RunningTimes& aOther) { + if (!aOther.IsEmpty()) { +#define RUNNING_TIME_TAKE(index, name, unit, jsonProperty) \ + if (aOther.Is##name##unit##Known()) { \ + Set##name##unit(std::exchange(aOther.m##name##unit, 0)); \ + } + + PROFILER_FOR_EACH_RUNNING_TIME(RUNNING_TIME_TAKE) + +#undef RUNNING_TIME_TAKE + + aOther.mKnownBits = 0; + } + return *this; + } + + // Difference from `aBefore` to `this`. Any unknown makes the result unknown. + // PostMeasurementTimeStamp set to `this` PostMeasurementTimeStamp, to keep + // the most recent timestamp associated with the end of the interval over + // which the difference applies. + RunningTimes operator-(const RunningTimes& aBefore) const { + RunningTimes diff; + diff.mPostMeasurementTimeStamp = mPostMeasurementTimeStamp; +#define RUNNING_TIME_SUB(index, name, unit, jsonProperty) \ + if (Is##name##unit##Known() && aBefore.Is##name##unit##Known()) { \ + diff.Set##name##unit(m##name##unit - aBefore.m##name##unit); \ + } + + PROFILER_FOR_EACH_RUNNING_TIME(RUNNING_TIME_SUB) + +#undef RUNNING_TIME_SUB + return diff; + } + + private: + friend mozilla::ProfileBufferEntryWriter::Serializer<RunningTimes>; + friend mozilla::ProfileBufferEntryReader::Deserializer<RunningTimes>; + + mozilla::TimeStamp mPostMeasurementTimeStamp; + + uint32_t mKnownBits = 0u; + +#define RUNNING_TIME_MEMBER(index, name, unit, jsonProperty) \ + static constexpr uint32_t mGot##name##unit = 1u << index; \ + uint64_t m##name##unit = 0; + + PROFILER_FOR_EACH_RUNNING_TIME(RUNNING_TIME_MEMBER) + +#undef RUNNING_TIME_MEMBER +}; + +template <> +struct mozilla::ProfileBufferEntryWriter::Serializer<RunningTimes> { + static Length Bytes(const RunningTimes& aRunningTimes) { + return ULEB128Size(aRunningTimes.mKnownBits) + + mozilla::CountPopulation32(aRunningTimes.mKnownBits) * + sizeof(uint64_t); + } + + static void Write(ProfileBufferEntryWriter& aEW, + const RunningTimes& aRunningTimes) { + aEW.WriteULEB128(aRunningTimes.mKnownBits); + +#define RUNNING_TIME_SERIALIZE(index, name, unit, jsonProperty) \ + if (aRunningTimes.Is##name##unit##Known()) { \ + aEW.WriteObject(aRunningTimes.m##name##unit); \ + } + + PROFILER_FOR_EACH_RUNNING_TIME(RUNNING_TIME_SERIALIZE) + +#undef RUNNING_TIME_SERIALIZE + } +}; + +template <> +struct mozilla::ProfileBufferEntryReader::Deserializer<RunningTimes> { + static void ReadInto(ProfileBufferEntryReader& aER, + RunningTimes& aRunningTimes) { + aRunningTimes = Read(aER); + } + + static RunningTimes Read(ProfileBufferEntryReader& aER) { + // Start with empty running times, everything is cleared. + RunningTimes times; + + // This sets all the bits into mKnownBits, we don't need to modify it + // further. + times.mKnownBits = aER.ReadULEB128<uint32_t>(); + + // For each member that should be known, read its value. +#define RUNNING_TIME_DESERIALIZE(index, name, unit, jsonProperty) \ + if (times.Is##name##unit##Known()) { \ + aER.ReadIntoObject(times.m##name##unit); \ + } + + PROFILER_FOR_EACH_RUNNING_TIME(RUNNING_TIME_DESERIALIZE) + +#undef RUNNING_TIME_DESERIALIZE + + return times; + } +}; + +#endif /* ndef TOOLS_PLATFORM_H_ */ diff --git a/tools/profiler/core/shared-libraries-linux.cc b/tools/profiler/core/shared-libraries-linux.cc new file mode 100644 index 0000000000..d91c78f632 --- /dev/null +++ b/tools/profiler/core/shared-libraries-linux.cc @@ -0,0 +1,253 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "shared-libraries.h" + +#define PATH_MAX_TOSTRING(x) #x +#define PATH_MAX_STRING(x) PATH_MAX_TOSTRING(x) +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <limits.h> +#include <unistd.h> +#include <fstream> +#include "platform.h" +#include "shared-libraries.h" +#include "GeckoProfiler.h" +#include "mozilla/Sprintf.h" +#include "mozilla/Unused.h" +#include "nsDebug.h" +#include "nsNativeCharsetUtils.h" +#include <nsTArray.h> + +#include "common/linux/file_id.h" +#include <algorithm> +#include <dlfcn.h> +#if defined(GP_OS_linux) || defined(GP_OS_android) +# include <features.h> +#endif +#include <sys/types.h> + +#if defined(GP_OS_linux) || defined(GP_OS_android) || defined(GP_OS_freebsd) +# include <link.h> // dl_phdr_info +#else +# error "Unexpected configuration" +#endif + +#if defined(GP_OS_android) +extern "C" MOZ_EXPORT __attribute__((weak)) int dl_iterate_phdr( + int (*callback)(struct dl_phdr_info* info, size_t size, void* data), + void* data); +#endif + +struct LoadedLibraryInfo { + LoadedLibraryInfo(const char* aName, unsigned long aBaseAddress, + unsigned long aFirstMappingStart, + unsigned long aLastMappingEnd) + : mName(aName), + mBaseAddress(aBaseAddress), + mFirstMappingStart(aFirstMappingStart), + mLastMappingEnd(aLastMappingEnd) {} + + nsCString mName; + unsigned long mBaseAddress; + unsigned long mFirstMappingStart; + unsigned long mLastMappingEnd; +}; + +static nsCString IDtoUUIDString( + const google_breakpad::wasteful_vector<uint8_t>& aIdentifier) { + using namespace google_breakpad; + + nsCString uuid; + const std::string str = FileID::ConvertIdentifierToUUIDString(aIdentifier); + uuid.Append(str.c_str(), str.size()); + // This is '0', not '\0', since it represents the breakpad id age. + uuid.Append('0'); + return uuid; +} + +// Get the breakpad Id for the binary file pointed by bin_name +static nsCString getId(const char* bin_name) { + using namespace google_breakpad; + + PageAllocator allocator; + auto_wasteful_vector<uint8_t, kDefaultBuildIdSize> identifier(&allocator); + + FileID file_id(bin_name); + if (file_id.ElfFileIdentifier(identifier)) { + return IDtoUUIDString(identifier); + } + + return ""_ns; +} + +static SharedLibrary SharedLibraryAtPath(const char* path, + unsigned long libStart, + unsigned long libEnd, + unsigned long offset = 0) { + nsAutoString pathStr; + mozilla::Unused << NS_WARN_IF( + NS_FAILED(NS_CopyNativeToUnicode(nsDependentCString(path), pathStr))); + + nsAutoString nameStr = pathStr; + int32_t pos = nameStr.RFindChar('/'); + if (pos != kNotFound) { + nameStr.Cut(0, pos + 1); + } + + return SharedLibrary(libStart, libEnd, offset, getId(path), nameStr, pathStr, + nameStr, pathStr, ""_ns, ""); +} + +static int dl_iterate_callback(struct dl_phdr_info* dl_info, size_t size, + void* data) { + auto libInfoList = reinterpret_cast<nsTArray<LoadedLibraryInfo>*>(data); + + if (dl_info->dlpi_phnum <= 0) return 0; + + unsigned long baseAddress = dl_info->dlpi_addr; + unsigned long firstMappingStart = -1; + unsigned long lastMappingEnd = 0; + + for (size_t i = 0; i < dl_info->dlpi_phnum; i++) { + if (dl_info->dlpi_phdr[i].p_type != PT_LOAD) { + continue; + } + unsigned long start = dl_info->dlpi_addr + dl_info->dlpi_phdr[i].p_vaddr; + unsigned long end = start + dl_info->dlpi_phdr[i].p_memsz; + if (start < firstMappingStart) { + firstMappingStart = start; + } + if (end > lastMappingEnd) { + lastMappingEnd = end; + } + } + + libInfoList->AppendElement(LoadedLibraryInfo( + dl_info->dlpi_name, baseAddress, firstMappingStart, lastMappingEnd)); + + return 0; +} + +SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() { + SharedLibraryInfo info; + +#if defined(GP_OS_linux) + // We need to find the name of the executable (exeName, exeNameLen) and the + // address of its executable section (exeExeAddr) in the running image. + char exeName[PATH_MAX]; + memset(exeName, 0, sizeof(exeName)); + + ssize_t exeNameLen = readlink("/proc/self/exe", exeName, sizeof(exeName) - 1); + if (exeNameLen == -1) { + // readlink failed for whatever reason. Note this, but keep going. + exeName[0] = '\0'; + exeNameLen = 0; + LOG("SharedLibraryInfo::GetInfoForSelf(): readlink failed"); + } else { + // Assert no buffer overflow. + MOZ_RELEASE_ASSERT(exeNameLen >= 0 && + exeNameLen < static_cast<ssize_t>(sizeof(exeName))); + } + + unsigned long exeExeAddr = 0; +#endif + +#if defined(GP_OS_android) + // If dl_iterate_phdr doesn't exist, we give up immediately. + if (!dl_iterate_phdr) { + // On ARM Android, dl_iterate_phdr is provided by the custom linker. + // So if libxul was loaded by the system linker (e.g. as part of + // xpcshell when running tests), it won't be available and we should + // not call it. + return info; + } +#endif + +#if defined(GP_OS_linux) || defined(GP_OS_android) + // Read info from /proc/self/maps. We ignore most of it. + pid_t pid = profiler_current_process_id(); + char path[PATH_MAX]; + SprintfLiteral(path, "/proc/%d/maps", pid); + std::ifstream maps(path); + std::string line; + while (std::getline(maps, line)) { + int ret; + unsigned long start; + unsigned long end; + char perm[6 + 1] = ""; + unsigned long offset; + char modulePath[PATH_MAX + 1] = ""; + ret = sscanf(line.c_str(), + "%lx-%lx %6s %lx %*s %*x %" PATH_MAX_STRING(PATH_MAX) "s\n", + &start, &end, perm, &offset, modulePath); + if (!strchr(perm, 'x')) { + // Ignore non executable entries + continue; + } + if (ret != 5 && ret != 4) { + LOG("SharedLibraryInfo::GetInfoForSelf(): " + "reading /proc/self/maps failed"); + continue; + } + +# if defined(GP_OS_linux) + // Try to establish the main executable's load address. + if (exeNameLen > 0 && strcmp(modulePath, exeName) == 0) { + exeExeAddr = start; + } +# elif defined(GP_OS_android) + // Use /proc/pid/maps to get the dalvik-jit section since it has no + // associated phdrs. + if (0 == strcmp(modulePath, "/dev/ashmem/dalvik-jit-code-cache")) { + info.AddSharedLibrary( + SharedLibraryAtPath(modulePath, start, end, offset)); + if (info.GetSize() > 10000) { + LOG("SharedLibraryInfo::GetInfoForSelf(): " + "implausibly large number of mappings acquired"); + break; + } + } +# endif + } +#endif + + nsTArray<LoadedLibraryInfo> libInfoList; + + // We collect the bulk of the library info using dl_iterate_phdr. + dl_iterate_phdr(dl_iterate_callback, &libInfoList); + + for (const auto& libInfo : libInfoList) { + info.AddSharedLibrary( + SharedLibraryAtPath(libInfo.mName.get(), libInfo.mFirstMappingStart, + libInfo.mLastMappingEnd, + libInfo.mFirstMappingStart - libInfo.mBaseAddress)); + } + +#if defined(GP_OS_linux) + // Make another pass over the information we just harvested from + // dl_iterate_phdr. If we see a nameless object mapped at what we earlier + // established to be the main executable's load address, attach the + // executable's name to that entry. + for (size_t i = 0; i < info.GetSize(); i++) { + SharedLibrary& lib = info.GetMutableEntry(i); + if (lib.GetStart() <= exeExeAddr && exeExeAddr <= lib.GetEnd() && + lib.GetNativeDebugPath().empty()) { + lib = SharedLibraryAtPath(exeName, lib.GetStart(), lib.GetEnd(), + lib.GetOffset()); + + // We only expect to see one such entry. + break; + } + } +#endif + + return info; +} + +void SharedLibraryInfo::Initialize() { /* do nothing */ +} diff --git a/tools/profiler/core/shared-libraries-macos.cc b/tools/profiler/core/shared-libraries-macos.cc new file mode 100644 index 0000000000..eb2704ea08 --- /dev/null +++ b/tools/profiler/core/shared-libraries-macos.cc @@ -0,0 +1,185 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "shared-libraries.h" + +#include "ClearOnShutdown.h" +#include "mozilla/StaticMutex.h" +#include "mozilla/Unused.h" +#include "nsNativeCharsetUtils.h" +#include <AvailabilityMacros.h> + +#include <dlfcn.h> +#include <mach-o/arch.h> +#include <mach-o/dyld_images.h> +#include <mach-o/dyld.h> +#include <mach-o/loader.h> +#include <mach/mach_init.h> +#include <mach/mach_traps.h> +#include <mach/task_info.h> +#include <mach/task.h> +#include <sstream> +#include <stdlib.h> +#include <string.h> +#include <vector> + +// Architecture specific abstraction. +#if defined(GP_ARCH_x86) +typedef mach_header platform_mach_header; +typedef segment_command mach_segment_command_type; +# define MACHO_MAGIC_NUMBER MH_MAGIC +# define CMD_SEGMENT LC_SEGMENT +# define seg_size uint32_t +#else +typedef mach_header_64 platform_mach_header; +typedef segment_command_64 mach_segment_command_type; +# define MACHO_MAGIC_NUMBER MH_MAGIC_64 +# define CMD_SEGMENT LC_SEGMENT_64 +# define seg_size uint64_t +#endif + +struct NativeSharedLibrary { + const platform_mach_header* header; + std::string path; +}; +static std::vector<NativeSharedLibrary>* sSharedLibrariesList = nullptr; +static mozilla::StaticMutex sSharedLibrariesMutex; + +static void SharedLibraryAddImage(const struct mach_header* mh, + intptr_t vmaddr_slide) { + // NOTE: Presumably for backwards-compatibility reasons, this function accepts + // a mach_header even on 64-bit where it ought to be a mach_header_64. We cast + // it to the right type here. + auto header = reinterpret_cast<const platform_mach_header*>(mh); + + Dl_info info; + if (!dladdr(header, &info)) { + return; + } + + mozilla::StaticMutexAutoLock lock(sSharedLibrariesMutex); + if (!sSharedLibrariesList) { + return; + } + + NativeSharedLibrary lib = {header, info.dli_fname}; + sSharedLibrariesList->push_back(lib); +} + +static void SharedLibraryRemoveImage(const struct mach_header* mh, + intptr_t vmaddr_slide) { + // NOTE: Presumably for backwards-compatibility reasons, this function accepts + // a mach_header even on 64-bit where it ought to be a mach_header_64. We cast + // it to the right type here. + auto header = reinterpret_cast<const platform_mach_header*>(mh); + + mozilla::StaticMutexAutoLock lock(sSharedLibrariesMutex); + if (!sSharedLibrariesList) { + return; + } + + uint32_t count = sSharedLibrariesList->size(); + for (uint32_t i = 0; i < count; ++i) { + if ((*sSharedLibrariesList)[i].header == header) { + sSharedLibrariesList->erase(sSharedLibrariesList->begin() + i); + return; + } + } +} + +void SharedLibraryInfo::Initialize() { + // NOTE: We intentionally leak this memory here. We're allocating dynamically + // in order to avoid static initializers. + sSharedLibrariesList = new std::vector<NativeSharedLibrary>(); + + _dyld_register_func_for_add_image(SharedLibraryAddImage); + _dyld_register_func_for_remove_image(SharedLibraryRemoveImage); +} + +static void addSharedLibrary(const platform_mach_header* header, + const char* path, SharedLibraryInfo& info) { + const struct load_command* cmd = + reinterpret_cast<const struct load_command*>(header + 1); + + seg_size size = 0; + unsigned long long start = reinterpret_cast<unsigned long long>(header); + // Find the cmd segment in the macho image. It will contain the offset we care + // about. + const uint8_t* uuid_bytes = nullptr; + for (unsigned int i = 0; + cmd && (i < header->ncmds) && (uuid_bytes == nullptr || size == 0); + ++i) { + if (cmd->cmd == CMD_SEGMENT) { + const mach_segment_command_type* seg = + reinterpret_cast<const mach_segment_command_type*>(cmd); + + if (!strcmp(seg->segname, "__TEXT")) { + size = seg->vmsize; + } + } else if (cmd->cmd == LC_UUID) { + const uuid_command* ucmd = reinterpret_cast<const uuid_command*>(cmd); + uuid_bytes = ucmd->uuid; + } + + cmd = reinterpret_cast<const struct load_command*>( + reinterpret_cast<const char*>(cmd) + cmd->cmdsize); + } + + nsAutoCString uuid; + if (uuid_bytes != nullptr) { + uuid.AppendPrintf( + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "%02X" + "0" /* breakpad id age */, + uuid_bytes[0], uuid_bytes[1], uuid_bytes[2], uuid_bytes[3], + uuid_bytes[4], uuid_bytes[5], uuid_bytes[6], uuid_bytes[7], + uuid_bytes[8], uuid_bytes[9], uuid_bytes[10], uuid_bytes[11], + uuid_bytes[12], uuid_bytes[13], uuid_bytes[14], uuid_bytes[15]); + } + + nsAutoString pathStr; + mozilla::Unused << NS_WARN_IF( + NS_FAILED(NS_CopyNativeToUnicode(nsDependentCString(path), pathStr))); + + nsAutoString nameStr = pathStr; + int32_t pos = nameStr.RFindChar('/'); + if (pos != kNotFound) { + nameStr.Cut(0, pos + 1); + } + + const NXArchInfo* archInfo = + NXGetArchInfoFromCpuType(header->cputype, header->cpusubtype); + + info.AddSharedLibrary(SharedLibrary(start, start + size, 0, uuid, nameStr, + pathStr, nameStr, pathStr, ""_ns, + archInfo ? archInfo->name : "")); +} + +// Translate the statically stored sSharedLibrariesList information into a +// SharedLibraryInfo object. +SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() { + mozilla::StaticMutexAutoLock lock(sSharedLibrariesMutex); + SharedLibraryInfo sharedLibraryInfo; + + for (auto& info : *sSharedLibrariesList) { + addSharedLibrary(info.header, info.path.c_str(), sharedLibraryInfo); + } + + return sharedLibraryInfo; +} diff --git a/tools/profiler/core/shared-libraries-win32.cc b/tools/profiler/core/shared-libraries-win32.cc new file mode 100644 index 0000000000..e209e4328f --- /dev/null +++ b/tools/profiler/core/shared-libraries-win32.cc @@ -0,0 +1,246 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include <windows.h> +#include <dbghelp.h> +#include <sstream> +#include <psapi.h> + +#include "shared-libraries.h" +#include "nsWindowsHelpers.h" +#include "mozilla/UniquePtr.h" +#include "mozilla/Unused.h" +#include "mozilla/WindowsProcessMitigations.h" +#include "mozilla/WindowsVersion.h" +#include "nsNativeCharsetUtils.h" +#include "nsPrintfCString.h" +#include "nsReadableUtils.h" + +#define CV_SIGNATURE 0x53445352 // 'SDSR' + +struct CodeViewRecord70 { + uint32_t signature; + GUID pdbSignature; + uint32_t pdbAge; + // A UTF-8 string, according to + // https://github.com/Microsoft/microsoft-pdb/blob/082c5290e5aff028ae84e43affa8be717aa7af73/PDB/dbi/locator.cpp#L785 + char pdbFileName[1]; +}; + +static bool GetPdbInfo(uintptr_t aStart, nsID& aSignature, uint32_t& aAge, + char** aPdbName) { + if (!aStart) { + return false; + } + + PIMAGE_DOS_HEADER dosHeader = reinterpret_cast<PIMAGE_DOS_HEADER>(aStart); + if (dosHeader->e_magic != IMAGE_DOS_SIGNATURE) { + return false; + } + + PIMAGE_NT_HEADERS ntHeaders = + reinterpret_cast<PIMAGE_NT_HEADERS>(aStart + dosHeader->e_lfanew); + if (ntHeaders->Signature != IMAGE_NT_SIGNATURE) { + return false; + } + + uint32_t relativeVirtualAddress = + ntHeaders->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_DEBUG] + .VirtualAddress; + if (!relativeVirtualAddress) { + return false; + } + + PIMAGE_DEBUG_DIRECTORY debugDirectory = + reinterpret_cast<PIMAGE_DEBUG_DIRECTORY>(aStart + relativeVirtualAddress); + if (!debugDirectory || debugDirectory->Type != IMAGE_DEBUG_TYPE_CODEVIEW) { + return false; + } + + CodeViewRecord70* debugInfo = reinterpret_cast<CodeViewRecord70*>( + aStart + debugDirectory->AddressOfRawData); + if (!debugInfo || debugInfo->signature != CV_SIGNATURE) { + return false; + } + + aAge = debugInfo->pdbAge; + GUID& pdbSignature = debugInfo->pdbSignature; + aSignature.m0 = pdbSignature.Data1; + aSignature.m1 = pdbSignature.Data2; + aSignature.m2 = pdbSignature.Data3; + memcpy(aSignature.m3, pdbSignature.Data4, sizeof(pdbSignature.Data4)); + + // The PDB file name could be different from module filename, so report both + // e.g. The PDB for C:\Windows\SysWOW64\ntdll.dll is wntdll.pdb + *aPdbName = debugInfo->pdbFileName; + + return true; +} + +static nsCString GetVersion(WCHAR* dllPath) { + DWORD infoSize = GetFileVersionInfoSizeW(dllPath, nullptr); + if (infoSize == 0) { + return ""_ns; + } + + mozilla::UniquePtr<unsigned char[]> infoData = + mozilla::MakeUnique<unsigned char[]>(infoSize); + if (!GetFileVersionInfoW(dllPath, 0, infoSize, infoData.get())) { + return ""_ns; + } + + VS_FIXEDFILEINFO* vInfo; + UINT vInfoLen; + if (!VerQueryValueW(infoData.get(), L"\\", (LPVOID*)&vInfo, &vInfoLen)) { + return ""_ns; + } + if (!vInfo) { + return ""_ns; + } + + nsPrintfCString version("%d.%d.%d.%d", vInfo->dwFileVersionMS >> 16, + vInfo->dwFileVersionMS & 0xFFFF, + vInfo->dwFileVersionLS >> 16, + vInfo->dwFileVersionLS & 0xFFFF); + return std::move(version); +} + +SharedLibraryInfo SharedLibraryInfo::GetInfoForSelf() { + SharedLibraryInfo sharedLibraryInfo; + + HANDLE hProcess = GetCurrentProcess(); + mozilla::UniquePtr<HMODULE[]> hMods; + size_t modulesNum = 0; + if (hProcess != NULL) { + DWORD modulesSize; + if (!EnumProcessModules(hProcess, nullptr, 0, &modulesSize)) { + return sharedLibraryInfo; + } + modulesNum = modulesSize / sizeof(HMODULE); + hMods = mozilla::MakeUnique<HMODULE[]>(modulesNum); + if (!EnumProcessModules(hProcess, hMods.get(), modulesNum * sizeof(HMODULE), + &modulesSize)) { + return sharedLibraryInfo; + } + // The list may have shrunk between calls + if (modulesSize / sizeof(HMODULE) < modulesNum) { + modulesNum = modulesSize / sizeof(HMODULE); + } + } + + for (unsigned int i = 0; i < modulesNum; i++) { + WCHAR modulePath[MAX_PATH + 1]; + if (!GetModuleFileNameEx(hProcess, hMods[i], modulePath, + sizeof(modulePath) / sizeof(WCHAR))) { + continue; + } + + MODULEINFO module = {0}; + if (!GetModuleInformation(hProcess, hMods[i], &module, + sizeof(MODULEINFO))) { + continue; + } + + nsAutoString modulePathStr(modulePath); + nsAutoString moduleNameStr = modulePathStr; + int32_t pos = moduleNameStr.RFindCharInSet(u"\\/"); + if (pos != kNotFound) { + moduleNameStr.Cut(0, pos + 1); + } + + // Hackaround for Bug 1607574. Nvidia's shim driver nvd3d9wrap[x].dll + // detours LoadLibraryExW when it's loaded and the detour function causes + // AV when the code tries to access data pointing to an address within + // unloaded nvinit[x].dll. + // The crashing code is executed when a given parameter is "detoured.dll" + // and OS version is older than 6.2. We hit that crash at the following + // call to LoadLibraryEx even if we specify LOAD_LIBRARY_AS_DATAFILE. + // We work around it by skipping LoadLibraryEx, and add a library info with + // a dummy breakpad id instead. +#if !defined(_M_ARM64) +# if defined(_M_AMD64) + LPCWSTR kNvidiaShimDriver = L"nvd3d9wrapx.dll"; + LPCWSTR kNvidiaInitDriver = L"nvinitx.dll"; +# elif defined(_M_IX86) + LPCWSTR kNvidiaShimDriver = L"nvd3d9wrap.dll"; + LPCWSTR kNvidiaInitDriver = L"nvinit.dll"; +# endif + if (moduleNameStr.LowerCaseEqualsLiteral("detoured.dll") && + !mozilla::IsWin8OrLater() && ::GetModuleHandle(kNvidiaShimDriver) && + !::GetModuleHandle(kNvidiaInitDriver)) { + constexpr auto pdbNameStr = u"detoured.pdb"_ns; + SharedLibrary shlib((uintptr_t)module.lpBaseOfDll, + (uintptr_t)module.lpBaseOfDll + module.SizeOfImage, + 0, // DLLs are always mapped at offset 0 on Windows + "000000000000000000000000000000000"_ns, moduleNameStr, + modulePathStr, pdbNameStr, pdbNameStr, ""_ns, ""); + sharedLibraryInfo.AddSharedLibrary(shlib); + continue; + } +#endif // !defined(_M_ARM64) + + // If EAF+ is enabled, parsing ntdll's PE header via GetPdbInfo() causes + // a crash. We don't include PDB information in SharedLibrary. + bool canGetPdbInfo = (!mozilla::IsEafPlusEnabled() || + !moduleNameStr.LowerCaseEqualsLiteral("ntdll.dll")); + + nsCString breakpadId; + // Load the module again to make sure that its handle will remain + // valid as we attempt to read the PDB information from it. We load the + // DLL as a datafile so that if the module actually gets unloaded between + // the call to EnumProcessModules and the following LoadLibraryEx, we don't + // end up running the now newly loaded module's DllMain function. If the + // module is already loaded, LoadLibraryEx just increments its refcount. + // + // Note that because of the race condition above, merely loading the DLL + // again is not safe enough, therefore we also need to make sure that we + // can read the memory mapped at the base address before we can safely + // proceed to actually access those pages. + HMODULE handleLock = + LoadLibraryEx(modulePath, NULL, LOAD_LIBRARY_AS_DATAFILE); + MEMORY_BASIC_INFORMATION vmemInfo = {0}; + nsID pdbSig; + uint32_t pdbAge; + nsAutoString pdbPathStr; + nsAutoString pdbNameStr; + char* pdbName = nullptr; + if (handleLock && + sizeof(vmemInfo) == + VirtualQuery(module.lpBaseOfDll, &vmemInfo, sizeof(vmemInfo)) && + vmemInfo.State == MEM_COMMIT && canGetPdbInfo && + GetPdbInfo((uintptr_t)module.lpBaseOfDll, pdbSig, pdbAge, &pdbName)) { + MOZ_ASSERT(breakpadId.IsEmpty()); + breakpadId.AppendPrintf( + "%08X" // m0 + "%04X%04X" // m1,m2 + "%02X%02X%02X%02X%02X%02X%02X%02X" // m3 + "%X", // pdbAge + pdbSig.m0, pdbSig.m1, pdbSig.m2, pdbSig.m3[0], pdbSig.m3[1], + pdbSig.m3[2], pdbSig.m3[3], pdbSig.m3[4], pdbSig.m3[5], pdbSig.m3[6], + pdbSig.m3[7], pdbAge); + + pdbPathStr = NS_ConvertUTF8toUTF16(pdbName); + pdbNameStr = pdbPathStr; + int32_t pos = pdbNameStr.RFindCharInSet(u"\\/"); + if (pos != kNotFound) { + pdbNameStr.Cut(0, pos + 1); + } + } + + SharedLibrary shlib((uintptr_t)module.lpBaseOfDll, + (uintptr_t)module.lpBaseOfDll + module.SizeOfImage, + 0, // DLLs are always mapped at offset 0 on Windows + breakpadId, moduleNameStr, modulePathStr, pdbNameStr, + pdbPathStr, GetVersion(modulePath), ""); + sharedLibraryInfo.AddSharedLibrary(shlib); + + FreeLibrary(handleLock); // ok to free null handles + } + + return sharedLibraryInfo; +} + +void SharedLibraryInfo::Initialize() { /* do nothing */ +} diff --git a/tools/profiler/core/vtune/ittnotify.h b/tools/profiler/core/vtune/ittnotify.h new file mode 100644 index 0000000000..f1d65b3328 --- /dev/null +++ b/tools/profiler/core/vtune/ittnotify.h @@ -0,0 +1,4123 @@ +/* <copyright> + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (c) 2005-2014 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + Contact Information: + http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/ + + BSD LICENSE + + Copyright (c) 2005-2014 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +</copyright> */ +#ifndef _ITTNOTIFY_H_ +#define _ITTNOTIFY_H_ + +/** +@file +@brief Public User API functions and types +@mainpage + +The ITT API is used to annotate a user's program with additional information +that can be used by correctness and performance tools. The user inserts +calls in their program. Those calls generate information that is collected +at runtime, and used by Intel(R) Threading Tools. + +@section API Concepts +The following general concepts are used throughout the API. + +@subsection Unicode Support +Many API functions take character string arguments. On Windows, there +are two versions of each such function. The function name is suffixed +by W if Unicode support is enabled, and by A otherwise. Any API function +that takes a character string argument adheres to this convention. + +@subsection Conditional Compilation +Many users prefer having an option to modify ITT API code when linking it +inside their runtimes. ITT API header file provides a mechanism to replace +ITT API function names inside your code with empty strings. To do this, +define the macros INTEL_NO_ITTNOTIFY_API during compilation and remove the +static library from the linker script. + +@subsection Domains +[see domains] +Domains provide a way to separate notification for different modules or +libraries in a program. Domains are specified by dotted character strings, +e.g. TBB.Internal.Control. + +A mechanism (to be specified) is provided to enable and disable +domains. By default, all domains are enabled. +@subsection Named Entities and Instances +Named entities (frames, regions, tasks, and markers) communicate +information about the program to the analysis tools. A named entity often +refers to a section of program code, or to some set of logical concepts +that the programmer wants to group together. + +Named entities relate to the programmer's static view of the program. When +the program actually executes, many instances of a given named entity +may be created. + +The API annotations denote instances of named entities. The actual +named entities are displayed using the analysis tools. In other words, +the named entities come into existence when instances are created. + +Instances of named entities may have instance identifiers (IDs). Some +API calls use instance identifiers to create relationships between +different instances of named entities. Other API calls associate data +with instances of named entities. + +Some named entities must always have instance IDs. In particular, regions +and frames always have IDs. Task and markers need IDs only if the ID is +needed in another API call (such as adding a relation or metadata). + +The lifetime of instance IDs is distinct from the lifetime of +instances. This allows various relationships to be specified separate +from the actual execution of instances. This flexibility comes at the +expense of extra API calls. + +The same ID may not be reused for different instances, unless a previous +[ref] __itt_id_destroy call for that ID has been issued. +*/ + +/** @cond exclude_from_documentation */ +#ifndef ITT_OS_WIN +# define ITT_OS_WIN 1 +#endif /* ITT_OS_WIN */ + +#ifndef ITT_OS_LINUX +# define ITT_OS_LINUX 2 +#endif /* ITT_OS_LINUX */ + +#ifndef ITT_OS_MAC +# define ITT_OS_MAC 3 +#endif /* ITT_OS_MAC */ + +#ifndef ITT_OS_FREEBSD +# define ITT_OS_FREEBSD 4 +#endif /* ITT_OS_FREEBSD */ + +#ifndef ITT_OS +# if defined WIN32 || defined _WIN32 +# define ITT_OS ITT_OS_WIN +# elif defined( __APPLE__ ) && defined( __MACH__ ) +# define ITT_OS ITT_OS_MAC +# elif defined( __FreeBSD__ ) +# define ITT_OS ITT_OS_FREEBSD +# else +# define ITT_OS ITT_OS_LINUX +# endif +#endif /* ITT_OS */ + +#ifndef ITT_PLATFORM_WIN +# define ITT_PLATFORM_WIN 1 +#endif /* ITT_PLATFORM_WIN */ + +#ifndef ITT_PLATFORM_POSIX +# define ITT_PLATFORM_POSIX 2 +#endif /* ITT_PLATFORM_POSIX */ + +#ifndef ITT_PLATFORM_MAC +# define ITT_PLATFORM_MAC 3 +#endif /* ITT_PLATFORM_MAC */ + +#ifndef ITT_PLATFORM_FREEBSD +# define ITT_PLATFORM_FREEBSD 4 +#endif /* ITT_PLATFORM_FREEBSD */ + +#ifndef ITT_PLATFORM +# if ITT_OS==ITT_OS_WIN +# define ITT_PLATFORM ITT_PLATFORM_WIN +# elif ITT_OS==ITT_OS_MAC +# define ITT_PLATFORM ITT_PLATFORM_MAC +# elif ITT_OS==ITT_OS_FREEBSD +# define ITT_PLATFORM ITT_PLATFORM_FREEBSD +# else +# define ITT_PLATFORM ITT_PLATFORM_POSIX +# endif +#endif /* ITT_PLATFORM */ + +#if defined(_UNICODE) && !defined(UNICODE) +#define UNICODE +#endif + +#include <stddef.h> +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#include <tchar.h> +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#include <stdint.h> +#if defined(UNICODE) || defined(_UNICODE) +#include <wchar.h> +#endif /* UNICODE || _UNICODE */ +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#ifndef ITTAPI_CDECL +# if ITT_PLATFORM==ITT_PLATFORM_WIN +# define ITTAPI_CDECL __cdecl +# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +# if defined _M_IX86 || defined __i386__ +# define ITTAPI_CDECL __attribute__ ((cdecl)) +# else /* _M_IX86 || __i386__ */ +# define ITTAPI_CDECL /* actual only on x86 platform */ +# endif /* _M_IX86 || __i386__ */ +# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* ITTAPI_CDECL */ + +#ifndef STDCALL +# if ITT_PLATFORM==ITT_PLATFORM_WIN +# define STDCALL __stdcall +# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +# if defined _M_IX86 || defined __i386__ +# define STDCALL __attribute__ ((stdcall)) +# else /* _M_IX86 || __i386__ */ +# define STDCALL /* supported only on x86 platform */ +# endif /* _M_IX86 || __i386__ */ +# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* STDCALL */ + +#define ITTAPI ITTAPI_CDECL +#define LIBITTAPI ITTAPI_CDECL + +/* TODO: Temporary for compatibility! */ +#define ITTAPI_CALL ITTAPI_CDECL +#define LIBITTAPI_CALL ITTAPI_CDECL + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +/* use __forceinline (VC++ specific) */ +#define ITT_INLINE __forceinline +#define ITT_INLINE_ATTRIBUTE /* nothing */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +/* + * Generally, functions are not inlined unless optimization is specified. + * For functions declared inline, this attribute inlines the function even + * if no optimization level was specified. + */ +#ifdef __STRICT_ANSI__ +#define ITT_INLINE static +#define ITT_INLINE_ATTRIBUTE __attribute__((unused)) +#else /* __STRICT_ANSI__ */ +#define ITT_INLINE static inline +#define ITT_INLINE_ATTRIBUTE __attribute__((always_inline, unused)) +#endif /* __STRICT_ANSI__ */ +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +/** @endcond */ + +#ifdef INTEL_ITTNOTIFY_ENABLE_LEGACY +# if ITT_PLATFORM==ITT_PLATFORM_WIN +# pragma message("WARNING!!! Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro") +# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +# warning "Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro" +# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +# include "vtune/legacy/ittnotify.h" +#endif /* INTEL_ITTNOTIFY_ENABLE_LEGACY */ + +/** @cond exclude_from_documentation */ +/* Helper macro for joining tokens */ +#define ITT_JOIN_AUX(p,n) p##n +#define ITT_JOIN(p,n) ITT_JOIN_AUX(p,n) + +#ifdef ITT_MAJOR +#undef ITT_MAJOR +#endif +#ifdef ITT_MINOR +#undef ITT_MINOR +#endif +#define ITT_MAJOR 3 +#define ITT_MINOR 0 + +/* Standard versioning of a token with major and minor version numbers */ +#define ITT_VERSIONIZE(x) \ + ITT_JOIN(x, \ + ITT_JOIN(_, \ + ITT_JOIN(ITT_MAJOR, \ + ITT_JOIN(_, ITT_MINOR)))) + +#ifndef INTEL_ITTNOTIFY_PREFIX +# define INTEL_ITTNOTIFY_PREFIX __itt_ +#endif /* INTEL_ITTNOTIFY_PREFIX */ +#ifndef INTEL_ITTNOTIFY_POSTFIX +# define INTEL_ITTNOTIFY_POSTFIX _ptr_ +#endif /* INTEL_ITTNOTIFY_POSTFIX */ + +#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n) +#define ITTNOTIFY_NAME(n) ITT_VERSIONIZE(ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX))) + +#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n) +#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n) + +#define ITTNOTIFY_VOID_D0(n,d) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d) +#define ITTNOTIFY_VOID_D1(n,d,x) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x) +#define ITTNOTIFY_VOID_D2(n,d,x,y) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y) +#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z) +#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) +#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) +#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) +#define ITTNOTIFY_DATA_D0(n,d) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d) +#define ITTNOTIFY_DATA_D1(n,d,x) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x) +#define ITTNOTIFY_DATA_D2(n,d,x,y) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y) +#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z) +#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) +#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) +#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) + +#ifdef ITT_STUB +#undef ITT_STUB +#endif +#ifdef ITT_STUBV +#undef ITT_STUBV +#endif +#define ITT_STUBV(api,type,name,args) \ + typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args; \ + extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name); +#define ITT_STUB ITT_STUBV +/** @endcond */ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** @cond exclude_from_gpa_documentation */ +/** + * @defgroup public Public API + * @{ + * @} + */ + +/** + * @defgroup control Collection Control + * @ingroup public + * General behavior: application continues to run, but no profiling information is being collected + * + * Pausing occurs not only for the current thread but for all process as well as spawned processes + * - Intel(R) Parallel Inspector and Intel(R) Inspector XE: + * - Does not analyze or report errors that involve memory access. + * - Other errors are reported as usual. Pausing data collection in + * Intel(R) Parallel Inspector and Intel(R) Inspector XE + * only pauses tracing and analyzing memory access. + * It does not pause tracing or analyzing threading APIs. + * . + * - Intel(R) Parallel Amplifier and Intel(R) VTune(TM) Amplifier XE: + * - Does continue to record when new threads are started. + * . + * - Other effects: + * - Possible reduction of runtime overhead. + * . + * @{ + */ +/** @brief Pause collection */ +void ITTAPI __itt_pause(void); +/** @brief Resume collection */ +void ITTAPI __itt_resume(void); +/** @brief Detach collection */ +void ITTAPI __itt_detach(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, pause, (void)) +ITT_STUBV(ITTAPI, void, resume, (void)) +ITT_STUBV(ITTAPI, void, detach, (void)) +#define __itt_pause ITTNOTIFY_VOID(pause) +#define __itt_pause_ptr ITTNOTIFY_NAME(pause) +#define __itt_resume ITTNOTIFY_VOID(resume) +#define __itt_resume_ptr ITTNOTIFY_NAME(resume) +#define __itt_detach ITTNOTIFY_VOID(detach) +#define __itt_detach_ptr ITTNOTIFY_NAME(detach) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_pause() +#define __itt_pause_ptr 0 +#define __itt_resume() +#define __itt_resume_ptr 0 +#define __itt_detach() +#define __itt_detach_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_pause_ptr 0 +#define __itt_resume_ptr 0 +#define __itt_detach_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} control group */ +/** @endcond */ + +/** + * @defgroup threads Threads + * @ingroup public + * Give names to threads + * @{ + */ +/** + * @brief Sets thread name of calling thread + * @param[in] name - name of thread + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_thread_set_nameA(const char *name); +void ITTAPI __itt_thread_set_nameW(const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_thread_set_name __itt_thread_set_nameW +# define __itt_thread_set_name_ptr __itt_thread_set_nameW_ptr +#else /* UNICODE */ +# define __itt_thread_set_name __itt_thread_set_nameA +# define __itt_thread_set_name_ptr __itt_thread_set_nameA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_thread_set_name(const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, thread_set_nameA, (const char *name)) +ITT_STUBV(ITTAPI, void, thread_set_nameW, (const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, thread_set_name, (const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_thread_set_nameA ITTNOTIFY_VOID(thread_set_nameA) +#define __itt_thread_set_nameA_ptr ITTNOTIFY_NAME(thread_set_nameA) +#define __itt_thread_set_nameW ITTNOTIFY_VOID(thread_set_nameW) +#define __itt_thread_set_nameW_ptr ITTNOTIFY_NAME(thread_set_nameW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_thread_set_name ITTNOTIFY_VOID(thread_set_name) +#define __itt_thread_set_name_ptr ITTNOTIFY_NAME(thread_set_name) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_thread_set_nameA(name) +#define __itt_thread_set_nameA_ptr 0 +#define __itt_thread_set_nameW(name) +#define __itt_thread_set_nameW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_thread_set_name(name) +#define __itt_thread_set_name_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_thread_set_nameA_ptr 0 +#define __itt_thread_set_nameW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_thread_set_name_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @cond exclude_from_gpa_documentation */ + +/** + * @brief Mark current thread as ignored from this point on, for the duration of its existence. + */ +void ITTAPI __itt_thread_ignore(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, thread_ignore, (void)) +#define __itt_thread_ignore ITTNOTIFY_VOID(thread_ignore) +#define __itt_thread_ignore_ptr ITTNOTIFY_NAME(thread_ignore) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_thread_ignore() +#define __itt_thread_ignore_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_thread_ignore_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} threads group */ + +/** + * @defgroup suppress Error suppression + * @ingroup public + * General behavior: application continues to run, but errors are suppressed + * + * @{ + */ + +/*****************************************************************//** + * @name group of functions used for error suppression in correctness tools + *********************************************************************/ +/** @{ */ +/** + * @hideinitializer + * @brief possible value for suppression mask + */ +#define __itt_suppress_all_errors 0x7fffffff + +/** + * @hideinitializer + * @brief possible value for suppression mask (suppresses errors from threading analysis) + */ +#define __itt_suppress_threading_errors 0x000000ff + +/** + * @hideinitializer + * @brief possible value for suppression mask (suppresses errors from memory analysis) + */ +#define __itt_suppress_memory_errors 0x0000ff00 + +/** + * @brief Start suppressing errors identified in mask on this thread + */ +void ITTAPI __itt_suppress_push(unsigned int mask); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, suppress_push, (unsigned int mask)) +#define __itt_suppress_push ITTNOTIFY_VOID(suppress_push) +#define __itt_suppress_push_ptr ITTNOTIFY_NAME(suppress_push) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_suppress_push(mask) +#define __itt_suppress_push_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_suppress_push_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Undo the effects of the matching call to __itt_suppress_push + */ +void ITTAPI __itt_suppress_pop(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, suppress_pop, (void)) +#define __itt_suppress_pop ITTNOTIFY_VOID(suppress_pop) +#define __itt_suppress_pop_ptr ITTNOTIFY_NAME(suppress_pop) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_suppress_pop() +#define __itt_suppress_pop_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_suppress_pop_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @enum __itt_model_disable + * @brief Enumerator for the disable methods + */ +typedef enum __itt_suppress_mode { + __itt_unsuppress_range, + __itt_suppress_range +} __itt_suppress_mode_t; + +/** + * @brief Mark a range of memory for error suppression or unsuppression for error types included in mask + */ +void ITTAPI __itt_suppress_mark_range(__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, suppress_mark_range, (__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size)) +#define __itt_suppress_mark_range ITTNOTIFY_VOID(suppress_mark_range) +#define __itt_suppress_mark_range_ptr ITTNOTIFY_NAME(suppress_mark_range) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_suppress_mark_range(mask) +#define __itt_suppress_mark_range_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_suppress_mark_range_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Undo the effect of a matching call to __itt_suppress_mark_range. If not matching + * call is found, nothing is changed. + */ +void ITTAPI __itt_suppress_clear_range(__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, suppress_clear_range, (__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size)) +#define __itt_suppress_clear_range ITTNOTIFY_VOID(suppress_clear_range) +#define __itt_suppress_clear_range_ptr ITTNOTIFY_NAME(suppress_clear_range) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_suppress_clear_range(mask) +#define __itt_suppress_clear_range_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_suppress_clear_range_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} */ +/** @} suppress group */ + +/** + * @defgroup sync Synchronization + * @ingroup public + * Indicate user-written synchronization code + * @{ + */ +/** + * @hideinitializer + * @brief possible value of attribute argument for sync object type + */ +#define __itt_attr_barrier 1 + +/** + * @hideinitializer + * @brief possible value of attribute argument for sync object type + */ +#define __itt_attr_mutex 2 + +/** +@brief Name a synchronization object +@param[in] addr Handle for the synchronization object. You should +use a real address to uniquely identify the synchronization object. +@param[in] objtype null-terminated object type string. If NULL is +passed, the name will be "User Synchronization". +@param[in] objname null-terminated object name string. If NULL, +no name will be assigned to the object. +@param[in] attribute one of [#__itt_attr_barrier, #__itt_attr_mutex] + */ + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_sync_createA(void *addr, const char *objtype, const char *objname, int attribute); +void ITTAPI __itt_sync_createW(void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_sync_create __itt_sync_createW +# define __itt_sync_create_ptr __itt_sync_createW_ptr +#else /* UNICODE */ +# define __itt_sync_create __itt_sync_createA +# define __itt_sync_create_ptr __itt_sync_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_sync_create (void *addr, const char *objtype, const char *objname, int attribute); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, sync_createA, (void *addr, const char *objtype, const char *objname, int attribute)) +ITT_STUBV(ITTAPI, void, sync_createW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, sync_create, (void *addr, const char* objtype, const char* objname, int attribute)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_createA ITTNOTIFY_VOID(sync_createA) +#define __itt_sync_createA_ptr ITTNOTIFY_NAME(sync_createA) +#define __itt_sync_createW ITTNOTIFY_VOID(sync_createW) +#define __itt_sync_createW_ptr ITTNOTIFY_NAME(sync_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_create ITTNOTIFY_VOID(sync_create) +#define __itt_sync_create_ptr ITTNOTIFY_NAME(sync_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_createA(addr, objtype, objname, attribute) +#define __itt_sync_createA_ptr 0 +#define __itt_sync_createW(addr, objtype, objname, attribute) +#define __itt_sync_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_create(addr, objtype, objname, attribute) +#define __itt_sync_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_createA_ptr 0 +#define __itt_sync_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** +@brief Rename a synchronization object + +You can use the rename call to assign or reassign a name to a given +synchronization object. +@param[in] addr handle for the synchronization object. +@param[in] name null-terminated object name string. +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_sync_renameA(void *addr, const char *name); +void ITTAPI __itt_sync_renameW(void *addr, const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_sync_rename __itt_sync_renameW +# define __itt_sync_rename_ptr __itt_sync_renameW_ptr +#else /* UNICODE */ +# define __itt_sync_rename __itt_sync_renameA +# define __itt_sync_rename_ptr __itt_sync_renameA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_sync_rename(void *addr, const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, sync_renameA, (void *addr, const char *name)) +ITT_STUBV(ITTAPI, void, sync_renameW, (void *addr, const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, sync_rename, (void *addr, const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_renameA ITTNOTIFY_VOID(sync_renameA) +#define __itt_sync_renameA_ptr ITTNOTIFY_NAME(sync_renameA) +#define __itt_sync_renameW ITTNOTIFY_VOID(sync_renameW) +#define __itt_sync_renameW_ptr ITTNOTIFY_NAME(sync_renameW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_rename ITTNOTIFY_VOID(sync_rename) +#define __itt_sync_rename_ptr ITTNOTIFY_NAME(sync_rename) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_renameA(addr, name) +#define __itt_sync_renameA_ptr 0 +#define __itt_sync_renameW(addr, name) +#define __itt_sync_renameW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_rename(addr, name) +#define __itt_sync_rename_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_sync_renameA_ptr 0 +#define __itt_sync_renameW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_sync_rename_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + @brief Destroy a synchronization object. + @param addr Handle for the synchronization object. + */ +void ITTAPI __itt_sync_destroy(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, sync_destroy, (void *addr)) +#define __itt_sync_destroy ITTNOTIFY_VOID(sync_destroy) +#define __itt_sync_destroy_ptr ITTNOTIFY_NAME(sync_destroy) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_sync_destroy(addr) +#define __itt_sync_destroy_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_sync_destroy_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/*****************************************************************//** + * @name group of functions is used for performance measurement tools + *********************************************************************/ +/** @{ */ +/** + * @brief Enter spin loop on user-defined sync object + */ +void ITTAPI __itt_sync_prepare(void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, sync_prepare, (void *addr)) +#define __itt_sync_prepare ITTNOTIFY_VOID(sync_prepare) +#define __itt_sync_prepare_ptr ITTNOTIFY_NAME(sync_prepare) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_sync_prepare(addr) +#define __itt_sync_prepare_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_sync_prepare_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Quit spin loop without acquiring spin object + */ +void ITTAPI __itt_sync_cancel(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, sync_cancel, (void *addr)) +#define __itt_sync_cancel ITTNOTIFY_VOID(sync_cancel) +#define __itt_sync_cancel_ptr ITTNOTIFY_NAME(sync_cancel) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_sync_cancel(addr) +#define __itt_sync_cancel_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_sync_cancel_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Successful spin loop completion (sync object acquired) + */ +void ITTAPI __itt_sync_acquired(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, sync_acquired, (void *addr)) +#define __itt_sync_acquired ITTNOTIFY_VOID(sync_acquired) +#define __itt_sync_acquired_ptr ITTNOTIFY_NAME(sync_acquired) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_sync_acquired(addr) +#define __itt_sync_acquired_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_sync_acquired_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Start sync object releasing code. Is called before the lock release call. + */ +void ITTAPI __itt_sync_releasing(void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, sync_releasing, (void *addr)) +#define __itt_sync_releasing ITTNOTIFY_VOID(sync_releasing) +#define __itt_sync_releasing_ptr ITTNOTIFY_NAME(sync_releasing) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_sync_releasing(addr) +#define __itt_sync_releasing_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_sync_releasing_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} */ + +/** @} sync group */ + +/**************************************************************//** + * @name group of functions is used for correctness checking tools + ******************************************************************/ +/** @{ */ +/** + * @ingroup legacy + * @deprecated Legacy API + * @brief Fast synchronization which does no require spinning. + * - This special function is to be used by TBB and OpenMP libraries only when they know + * there is no spin but they need to suppress TC warnings about shared variable modifications. + * - It only has corresponding pointers in static library and does not have corresponding function + * in dynamic library. + * @see void __itt_sync_prepare(void* addr); + */ +void ITTAPI __itt_fsync_prepare(void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, fsync_prepare, (void *addr)) +#define __itt_fsync_prepare ITTNOTIFY_VOID(fsync_prepare) +#define __itt_fsync_prepare_ptr ITTNOTIFY_NAME(fsync_prepare) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_fsync_prepare(addr) +#define __itt_fsync_prepare_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_fsync_prepare_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup legacy + * @deprecated Legacy API + * @brief Fast synchronization which does no require spinning. + * - This special function is to be used by TBB and OpenMP libraries only when they know + * there is no spin but they need to suppress TC warnings about shared variable modifications. + * - It only has corresponding pointers in static library and does not have corresponding function + * in dynamic library. + * @see void __itt_sync_cancel(void *addr); + */ +void ITTAPI __itt_fsync_cancel(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, fsync_cancel, (void *addr)) +#define __itt_fsync_cancel ITTNOTIFY_VOID(fsync_cancel) +#define __itt_fsync_cancel_ptr ITTNOTIFY_NAME(fsync_cancel) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_fsync_cancel(addr) +#define __itt_fsync_cancel_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_fsync_cancel_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup legacy + * @deprecated Legacy API + * @brief Fast synchronization which does no require spinning. + * - This special function is to be used by TBB and OpenMP libraries only when they know + * there is no spin but they need to suppress TC warnings about shared variable modifications. + * - It only has corresponding pointers in static library and does not have corresponding function + * in dynamic library. + * @see void __itt_sync_acquired(void *addr); + */ +void ITTAPI __itt_fsync_acquired(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, fsync_acquired, (void *addr)) +#define __itt_fsync_acquired ITTNOTIFY_VOID(fsync_acquired) +#define __itt_fsync_acquired_ptr ITTNOTIFY_NAME(fsync_acquired) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_fsync_acquired(addr) +#define __itt_fsync_acquired_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_fsync_acquired_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup legacy + * @deprecated Legacy API + * @brief Fast synchronization which does no require spinning. + * - This special function is to be used by TBB and OpenMP libraries only when they know + * there is no spin but they need to suppress TC warnings about shared variable modifications. + * - It only has corresponding pointers in static library and does not have corresponding function + * in dynamic library. + * @see void __itt_sync_releasing(void* addr); + */ +void ITTAPI __itt_fsync_releasing(void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, fsync_releasing, (void *addr)) +#define __itt_fsync_releasing ITTNOTIFY_VOID(fsync_releasing) +#define __itt_fsync_releasing_ptr ITTNOTIFY_NAME(fsync_releasing) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_fsync_releasing(addr) +#define __itt_fsync_releasing_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_fsync_releasing_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} */ + +/** + * @defgroup model Modeling by Intel(R) Parallel Advisor + * @ingroup public + * This is the subset of itt used for modeling by Intel(R) Parallel Advisor. + * This API is called ONLY using annotate.h, by "Annotation" macros + * the user places in their sources during the parallelism modeling steps. + * + * site_begin/end and task_begin/end take the address of handle variables, + * which are writeable by the API. Handles must be 0 initialized prior + * to the first call to begin, or may cause a run-time failure. + * The handles are initialized in a multi-thread safe way by the API if + * the handle is 0. The commonly expected idiom is one static handle to + * identify a site or task. If a site or task of the same name has already + * been started during this collection, the same handle MAY be returned, + * but is not required to be - it is unspecified if data merging is done + * based on name. These routines also take an instance variable. Like + * the lexical instance, these must be 0 initialized. Unlike the lexical + * instance, this is used to track a single dynamic instance. + * + * API used by the Intel(R) Parallel Advisor to describe potential concurrency + * and related activities. User-added source annotations expand to calls + * to these procedures to enable modeling of a hypothetical concurrent + * execution serially. + * @{ + */ +#if !defined(_ADVISOR_ANNOTATE_H_) || defined(ANNOTATE_EXPAND_NULL) + +typedef void* __itt_model_site; /*!< @brief handle for lexical site */ +typedef void* __itt_model_site_instance; /*!< @brief handle for dynamic instance */ +typedef void* __itt_model_task; /*!< @brief handle for lexical site */ +typedef void* __itt_model_task_instance; /*!< @brief handle for dynamic instance */ + +/** + * @enum __itt_model_disable + * @brief Enumerator for the disable methods + */ +typedef enum { + __itt_model_disable_observation, + __itt_model_disable_collection +} __itt_model_disable; + +#endif /* !_ADVISOR_ANNOTATE_H_ || ANNOTATE_EXPAND_NULL */ + +/** + * @brief ANNOTATE_SITE_BEGIN/ANNOTATE_SITE_END support. + * + * site_begin/end model a potential concurrency site. + * site instances may be recursively nested with themselves. + * site_end exits the most recently started but unended site for the current + * thread. The handle passed to end may be used to validate structure. + * Instances of a site encountered on different threads concurrently + * are considered completely distinct. If the site name for two different + * lexical sites match, it is unspecified whether they are treated as the + * same or different for data presentation. + */ +void ITTAPI __itt_model_site_begin(__itt_model_site *site, __itt_model_site_instance *instance, const char *name); +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_model_site_beginW(const wchar_t *name); +#endif +void ITTAPI __itt_model_site_beginA(const char *name); +void ITTAPI __itt_model_site_beginAL(const char *name, size_t siteNameLen); +void ITTAPI __itt_model_site_end (__itt_model_site *site, __itt_model_site_instance *instance); +void ITTAPI __itt_model_site_end_2(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_site_begin, (__itt_model_site *site, __itt_model_site_instance *instance, const char *name)) +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, model_site_beginW, (const wchar_t *name)) +#endif +ITT_STUBV(ITTAPI, void, model_site_beginA, (const char *name)) +ITT_STUBV(ITTAPI, void, model_site_beginAL, (const char *name, size_t siteNameLen)) +ITT_STUBV(ITTAPI, void, model_site_end, (__itt_model_site *site, __itt_model_site_instance *instance)) +ITT_STUBV(ITTAPI, void, model_site_end_2, (void)) +#define __itt_model_site_begin ITTNOTIFY_VOID(model_site_begin) +#define __itt_model_site_begin_ptr ITTNOTIFY_NAME(model_site_begin) +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_site_beginW ITTNOTIFY_VOID(model_site_beginW) +#define __itt_model_site_beginW_ptr ITTNOTIFY_NAME(model_site_beginW) +#endif +#define __itt_model_site_beginA ITTNOTIFY_VOID(model_site_beginA) +#define __itt_model_site_beginA_ptr ITTNOTIFY_NAME(model_site_beginA) +#define __itt_model_site_beginAL ITTNOTIFY_VOID(model_site_beginAL) +#define __itt_model_site_beginAL_ptr ITTNOTIFY_NAME(model_site_beginAL) +#define __itt_model_site_end ITTNOTIFY_VOID(model_site_end) +#define __itt_model_site_end_ptr ITTNOTIFY_NAME(model_site_end) +#define __itt_model_site_end_2 ITTNOTIFY_VOID(model_site_end_2) +#define __itt_model_site_end_2_ptr ITTNOTIFY_NAME(model_site_end_2) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_site_begin(site, instance, name) +#define __itt_model_site_begin_ptr 0 +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_site_beginW(name) +#define __itt_model_site_beginW_ptr 0 +#endif +#define __itt_model_site_beginA(name) +#define __itt_model_site_beginA_ptr 0 +#define __itt_model_site_beginAL(name, siteNameLen) +#define __itt_model_site_beginAL_ptr 0 +#define __itt_model_site_end(site, instance) +#define __itt_model_site_end_ptr 0 +#define __itt_model_site_end_2() +#define __itt_model_site_end_2_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_site_begin_ptr 0 +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_site_beginW_ptr 0 +#endif +#define __itt_model_site_beginA_ptr 0 +#define __itt_model_site_beginAL_ptr 0 +#define __itt_model_site_end_ptr 0 +#define __itt_model_site_end_2_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_TASK_BEGIN/ANNOTATE_TASK_END support + * + * task_begin/end model a potential task, which is contained within the most + * closely enclosing dynamic site. task_end exits the most recently started + * but unended task. The handle passed to end may be used to validate + * structure. It is unspecified if bad dynamic nesting is detected. If it + * is, it should be encoded in the resulting data collection. The collector + * should not fail due to construct nesting issues, nor attempt to directly + * indicate the problem. + */ +void ITTAPI __itt_model_task_begin(__itt_model_task *task, __itt_model_task_instance *instance, const char *name); +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_model_task_beginW(const wchar_t *name); +void ITTAPI __itt_model_iteration_taskW(const wchar_t *name); +#endif +void ITTAPI __itt_model_task_beginA(const char *name); +void ITTAPI __itt_model_task_beginAL(const char *name, size_t taskNameLen); +void ITTAPI __itt_model_iteration_taskA(const char *name); +void ITTAPI __itt_model_iteration_taskAL(const char *name, size_t taskNameLen); +void ITTAPI __itt_model_task_end (__itt_model_task *task, __itt_model_task_instance *instance); +void ITTAPI __itt_model_task_end_2(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_task_begin, (__itt_model_task *task, __itt_model_task_instance *instance, const char *name)) +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, model_task_beginW, (const wchar_t *name)) +ITT_STUBV(ITTAPI, void, model_iteration_taskW, (const wchar_t *name)) +#endif +ITT_STUBV(ITTAPI, void, model_task_beginA, (const char *name)) +ITT_STUBV(ITTAPI, void, model_task_beginAL, (const char *name, size_t taskNameLen)) +ITT_STUBV(ITTAPI, void, model_iteration_taskA, (const char *name)) +ITT_STUBV(ITTAPI, void, model_iteration_taskAL, (const char *name, size_t taskNameLen)) +ITT_STUBV(ITTAPI, void, model_task_end, (__itt_model_task *task, __itt_model_task_instance *instance)) +ITT_STUBV(ITTAPI, void, model_task_end_2, (void)) +#define __itt_model_task_begin ITTNOTIFY_VOID(model_task_begin) +#define __itt_model_task_begin_ptr ITTNOTIFY_NAME(model_task_begin) +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_task_beginW ITTNOTIFY_VOID(model_task_beginW) +#define __itt_model_task_beginW_ptr ITTNOTIFY_NAME(model_task_beginW) +#define __itt_model_iteration_taskW ITTNOTIFY_VOID(model_iteration_taskW) +#define __itt_model_iteration_taskW_ptr ITTNOTIFY_NAME(model_iteration_taskW) +#endif +#define __itt_model_task_beginA ITTNOTIFY_VOID(model_task_beginA) +#define __itt_model_task_beginA_ptr ITTNOTIFY_NAME(model_task_beginA) +#define __itt_model_task_beginAL ITTNOTIFY_VOID(model_task_beginAL) +#define __itt_model_task_beginAL_ptr ITTNOTIFY_NAME(model_task_beginAL) +#define __itt_model_iteration_taskA ITTNOTIFY_VOID(model_iteration_taskA) +#define __itt_model_iteration_taskA_ptr ITTNOTIFY_NAME(model_iteration_taskA) +#define __itt_model_iteration_taskAL ITTNOTIFY_VOID(model_iteration_taskAL) +#define __itt_model_iteration_taskAL_ptr ITTNOTIFY_NAME(model_iteration_taskAL) +#define __itt_model_task_end ITTNOTIFY_VOID(model_task_end) +#define __itt_model_task_end_ptr ITTNOTIFY_NAME(model_task_end) +#define __itt_model_task_end_2 ITTNOTIFY_VOID(model_task_end_2) +#define __itt_model_task_end_2_ptr ITTNOTIFY_NAME(model_task_end_2) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_task_begin(task, instance, name) +#define __itt_model_task_begin_ptr 0 +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_task_beginW(name) +#define __itt_model_task_beginW_ptr 0 +#endif +#define __itt_model_task_beginA(name) +#define __itt_model_task_beginA_ptr 0 +#define __itt_model_task_beginAL(name, siteNameLen) +#define __itt_model_task_beginAL_ptr 0 +#define __itt_model_iteration_taskA(name) +#define __itt_model_iteration_taskA_ptr 0 +#define __itt_model_iteration_taskAL(name, siteNameLen) +#define __itt_model_iteration_taskAL_ptr 0 +#define __itt_model_task_end(task, instance) +#define __itt_model_task_end_ptr 0 +#define __itt_model_task_end_2() +#define __itt_model_task_end_2_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_task_begin_ptr 0 +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_model_task_beginW_ptr 0 +#endif +#define __itt_model_task_beginA_ptr 0 +#define __itt_model_task_beginAL_ptr 0 +#define __itt_model_iteration_taskA_ptr 0 +#define __itt_model_iteration_taskAL_ptr 0 +#define __itt_model_task_end_ptr 0 +#define __itt_model_task_end_2_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_LOCK_ACQUIRE/ANNOTATE_LOCK_RELEASE support + * + * lock_acquire/release model a potential lock for both lockset and + * performance modeling. Each unique address is modeled as a separate + * lock, with invalid addresses being valid lock IDs. Specifically: + * no storage is accessed by the API at the specified address - it is only + * used for lock identification. Lock acquires may be self-nested and are + * unlocked by a corresponding number of releases. + * (These closely correspond to __itt_sync_acquired/__itt_sync_releasing, + * but may not have identical semantics.) + */ +void ITTAPI __itt_model_lock_acquire(void *lock); +void ITTAPI __itt_model_lock_acquire_2(void *lock); +void ITTAPI __itt_model_lock_release(void *lock); +void ITTAPI __itt_model_lock_release_2(void *lock); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_lock_acquire, (void *lock)) +ITT_STUBV(ITTAPI, void, model_lock_acquire_2, (void *lock)) +ITT_STUBV(ITTAPI, void, model_lock_release, (void *lock)) +ITT_STUBV(ITTAPI, void, model_lock_release_2, (void *lock)) +#define __itt_model_lock_acquire ITTNOTIFY_VOID(model_lock_acquire) +#define __itt_model_lock_acquire_ptr ITTNOTIFY_NAME(model_lock_acquire) +#define __itt_model_lock_acquire_2 ITTNOTIFY_VOID(model_lock_acquire_2) +#define __itt_model_lock_acquire_2_ptr ITTNOTIFY_NAME(model_lock_acquire_2) +#define __itt_model_lock_release ITTNOTIFY_VOID(model_lock_release) +#define __itt_model_lock_release_ptr ITTNOTIFY_NAME(model_lock_release) +#define __itt_model_lock_release_2 ITTNOTIFY_VOID(model_lock_release_2) +#define __itt_model_lock_release_2_ptr ITTNOTIFY_NAME(model_lock_release_2) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_lock_acquire(lock) +#define __itt_model_lock_acquire_ptr 0 +#define __itt_model_lock_acquire_2(lock) +#define __itt_model_lock_acquire_2_ptr 0 +#define __itt_model_lock_release(lock) +#define __itt_model_lock_release_ptr 0 +#define __itt_model_lock_release_2(lock) +#define __itt_model_lock_release_2_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_lock_acquire_ptr 0 +#define __itt_model_lock_acquire_2_ptr 0 +#define __itt_model_lock_release_ptr 0 +#define __itt_model_lock_release_2_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_RECORD_ALLOCATION/ANNOTATE_RECORD_DEALLOCATION support + * + * record_allocation/deallocation describe user-defined memory allocator + * behavior, which may be required for correctness modeling to understand + * when storage is not expected to be actually reused across threads. + */ +void ITTAPI __itt_model_record_allocation (void *addr, size_t size); +void ITTAPI __itt_model_record_deallocation(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_record_allocation, (void *addr, size_t size)) +ITT_STUBV(ITTAPI, void, model_record_deallocation, (void *addr)) +#define __itt_model_record_allocation ITTNOTIFY_VOID(model_record_allocation) +#define __itt_model_record_allocation_ptr ITTNOTIFY_NAME(model_record_allocation) +#define __itt_model_record_deallocation ITTNOTIFY_VOID(model_record_deallocation) +#define __itt_model_record_deallocation_ptr ITTNOTIFY_NAME(model_record_deallocation) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_record_allocation(addr, size) +#define __itt_model_record_allocation_ptr 0 +#define __itt_model_record_deallocation(addr) +#define __itt_model_record_deallocation_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_record_allocation_ptr 0 +#define __itt_model_record_deallocation_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_INDUCTION_USES support + * + * Note particular storage is inductive through the end of the current site + */ +void ITTAPI __itt_model_induction_uses(void* addr, size_t size); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_induction_uses, (void *addr, size_t size)) +#define __itt_model_induction_uses ITTNOTIFY_VOID(model_induction_uses) +#define __itt_model_induction_uses_ptr ITTNOTIFY_NAME(model_induction_uses) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_induction_uses(addr, size) +#define __itt_model_induction_uses_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_induction_uses_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_REDUCTION_USES support + * + * Note particular storage is used for reduction through the end + * of the current site + */ +void ITTAPI __itt_model_reduction_uses(void* addr, size_t size); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_reduction_uses, (void *addr, size_t size)) +#define __itt_model_reduction_uses ITTNOTIFY_VOID(model_reduction_uses) +#define __itt_model_reduction_uses_ptr ITTNOTIFY_NAME(model_reduction_uses) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_reduction_uses(addr, size) +#define __itt_model_reduction_uses_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_reduction_uses_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_OBSERVE_USES support + * + * Have correctness modeling record observations about uses of storage + * through the end of the current site + */ +void ITTAPI __itt_model_observe_uses(void* addr, size_t size); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_observe_uses, (void *addr, size_t size)) +#define __itt_model_observe_uses ITTNOTIFY_VOID(model_observe_uses) +#define __itt_model_observe_uses_ptr ITTNOTIFY_NAME(model_observe_uses) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_observe_uses(addr, size) +#define __itt_model_observe_uses_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_observe_uses_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_CLEAR_USES support + * + * Clear the special handling of a piece of storage related to induction, + * reduction or observe_uses + */ +void ITTAPI __itt_model_clear_uses(void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_clear_uses, (void *addr)) +#define __itt_model_clear_uses ITTNOTIFY_VOID(model_clear_uses) +#define __itt_model_clear_uses_ptr ITTNOTIFY_NAME(model_clear_uses) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_clear_uses(addr) +#define __itt_model_clear_uses_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_clear_uses_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief ANNOTATE_DISABLE_*_PUSH/ANNOTATE_DISABLE_*_POP support + * + * disable_push/disable_pop push and pop disabling based on a parameter. + * Disabling observations stops processing of memory references during + * correctness modeling, and all annotations that occur in the disabled + * region. This allows description of code that is expected to be handled + * specially during conversion to parallelism or that is not recognized + * by tools (e.g. some kinds of synchronization operations.) + * This mechanism causes all annotations in the disabled region, other + * than disable_push and disable_pop, to be ignored. (For example, this + * might validly be used to disable an entire parallel site and the contained + * tasks and locking in it for data collection purposes.) + * The disable for collection is a more expensive operation, but reduces + * collector overhead significantly. This applies to BOTH correctness data + * collection and performance data collection. For example, a site + * containing a task might only enable data collection for the first 10 + * iterations. Both performance and correctness data should reflect this, + * and the program should run as close to full speed as possible when + * collection is disabled. + */ +void ITTAPI __itt_model_disable_push(__itt_model_disable x); +void ITTAPI __itt_model_disable_pop(void); +void ITTAPI __itt_model_aggregate_task(size_t x); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, model_disable_push, (__itt_model_disable x)) +ITT_STUBV(ITTAPI, void, model_disable_pop, (void)) +ITT_STUBV(ITTAPI, void, model_aggregate_task, (size_t x)) +#define __itt_model_disable_push ITTNOTIFY_VOID(model_disable_push) +#define __itt_model_disable_push_ptr ITTNOTIFY_NAME(model_disable_push) +#define __itt_model_disable_pop ITTNOTIFY_VOID(model_disable_pop) +#define __itt_model_disable_pop_ptr ITTNOTIFY_NAME(model_disable_pop) +#define __itt_model_aggregate_task ITTNOTIFY_VOID(model_aggregate_task) +#define __itt_model_aggregate_task_ptr ITTNOTIFY_NAME(model_aggregate_task) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_model_disable_push(x) +#define __itt_model_disable_push_ptr 0 +#define __itt_model_disable_pop() +#define __itt_model_disable_pop_ptr 0 +#define __itt_model_aggregate_task(x) +#define __itt_model_aggregate_task_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_model_disable_push_ptr 0 +#define __itt_model_disable_pop_ptr 0 +#define __itt_model_aggregate_task_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} model group */ + +/** + * @defgroup heap Heap + * @ingroup public + * Heap group + * @{ + */ + +typedef void* __itt_heap_function; + +/** + * @brief Create an identification for heap function + * @return non-zero identifier or NULL + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_heap_function ITTAPI __itt_heap_function_createA(const char* name, const char* domain); +__itt_heap_function ITTAPI __itt_heap_function_createW(const wchar_t* name, const wchar_t* domain); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_heap_function_create __itt_heap_function_createW +# define __itt_heap_function_create_ptr __itt_heap_function_createW_ptr +#else +# define __itt_heap_function_create __itt_heap_function_createA +# define __itt_heap_function_create_ptr __itt_heap_function_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_heap_function ITTAPI __itt_heap_function_create(const char* name, const char* domain); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createA, (const char* name, const char* domain)) +ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createW, (const wchar_t* name, const wchar_t* domain)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_heap_function, heap_function_create, (const char* name, const char* domain)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_heap_function_createA ITTNOTIFY_DATA(heap_function_createA) +#define __itt_heap_function_createA_ptr ITTNOTIFY_NAME(heap_function_createA) +#define __itt_heap_function_createW ITTNOTIFY_DATA(heap_function_createW) +#define __itt_heap_function_createW_ptr ITTNOTIFY_NAME(heap_function_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_heap_function_create ITTNOTIFY_DATA(heap_function_create) +#define __itt_heap_function_create_ptr ITTNOTIFY_NAME(heap_function_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_heap_function_createA(name, domain) (__itt_heap_function)0 +#define __itt_heap_function_createA_ptr 0 +#define __itt_heap_function_createW(name, domain) (__itt_heap_function)0 +#define __itt_heap_function_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_heap_function_create(name, domain) (__itt_heap_function)0 +#define __itt_heap_function_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_heap_function_createA_ptr 0 +#define __itt_heap_function_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_heap_function_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an allocation begin occurrence. + */ +void ITTAPI __itt_heap_allocate_begin(__itt_heap_function h, size_t size, int initialized); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_allocate_begin, (__itt_heap_function h, size_t size, int initialized)) +#define __itt_heap_allocate_begin ITTNOTIFY_VOID(heap_allocate_begin) +#define __itt_heap_allocate_begin_ptr ITTNOTIFY_NAME(heap_allocate_begin) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_allocate_begin(h, size, initialized) +#define __itt_heap_allocate_begin_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_allocate_begin_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an allocation end occurrence. + */ +void ITTAPI __itt_heap_allocate_end(__itt_heap_function h, void** addr, size_t size, int initialized); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_allocate_end, (__itt_heap_function h, void** addr, size_t size, int initialized)) +#define __itt_heap_allocate_end ITTNOTIFY_VOID(heap_allocate_end) +#define __itt_heap_allocate_end_ptr ITTNOTIFY_NAME(heap_allocate_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_allocate_end(h, addr, size, initialized) +#define __itt_heap_allocate_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_allocate_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an free begin occurrence. + */ +void ITTAPI __itt_heap_free_begin(__itt_heap_function h, void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_free_begin, (__itt_heap_function h, void* addr)) +#define __itt_heap_free_begin ITTNOTIFY_VOID(heap_free_begin) +#define __itt_heap_free_begin_ptr ITTNOTIFY_NAME(heap_free_begin) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_free_begin(h, addr) +#define __itt_heap_free_begin_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_free_begin_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an free end occurrence. + */ +void ITTAPI __itt_heap_free_end(__itt_heap_function h, void* addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_free_end, (__itt_heap_function h, void* addr)) +#define __itt_heap_free_end ITTNOTIFY_VOID(heap_free_end) +#define __itt_heap_free_end_ptr ITTNOTIFY_NAME(heap_free_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_free_end(h, addr) +#define __itt_heap_free_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_free_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an reallocation begin occurrence. + */ +void ITTAPI __itt_heap_reallocate_begin(__itt_heap_function h, void* addr, size_t new_size, int initialized); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_reallocate_begin, (__itt_heap_function h, void* addr, size_t new_size, int initialized)) +#define __itt_heap_reallocate_begin ITTNOTIFY_VOID(heap_reallocate_begin) +#define __itt_heap_reallocate_begin_ptr ITTNOTIFY_NAME(heap_reallocate_begin) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_reallocate_begin(h, addr, new_size, initialized) +#define __itt_heap_reallocate_begin_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_reallocate_begin_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an reallocation end occurrence. + */ +void ITTAPI __itt_heap_reallocate_end(__itt_heap_function h, void* addr, void** new_addr, size_t new_size, int initialized); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_reallocate_end, (__itt_heap_function h, void* addr, void** new_addr, size_t new_size, int initialized)) +#define __itt_heap_reallocate_end ITTNOTIFY_VOID(heap_reallocate_end) +#define __itt_heap_reallocate_end_ptr ITTNOTIFY_NAME(heap_reallocate_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_reallocate_end(h, addr, new_addr, new_size, initialized) +#define __itt_heap_reallocate_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_reallocate_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @brief internal access begin */ +void ITTAPI __itt_heap_internal_access_begin(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_internal_access_begin, (void)) +#define __itt_heap_internal_access_begin ITTNOTIFY_VOID(heap_internal_access_begin) +#define __itt_heap_internal_access_begin_ptr ITTNOTIFY_NAME(heap_internal_access_begin) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_internal_access_begin() +#define __itt_heap_internal_access_begin_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_internal_access_begin_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @brief internal access end */ +void ITTAPI __itt_heap_internal_access_end(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_internal_access_end, (void)) +#define __itt_heap_internal_access_end ITTNOTIFY_VOID(heap_internal_access_end) +#define __itt_heap_internal_access_end_ptr ITTNOTIFY_NAME(heap_internal_access_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_internal_access_end() +#define __itt_heap_internal_access_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_internal_access_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @brief record memory growth begin */ +void ITTAPI __itt_heap_record_memory_growth_begin(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_record_memory_growth_begin, (void)) +#define __itt_heap_record_memory_growth_begin ITTNOTIFY_VOID(heap_record_memory_growth_begin) +#define __itt_heap_record_memory_growth_begin_ptr ITTNOTIFY_NAME(heap_record_memory_growth_begin) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_record_memory_growth_begin() +#define __itt_heap_record_memory_growth_begin_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_record_memory_growth_begin_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @brief record memory growth end */ +void ITTAPI __itt_heap_record_memory_growth_end(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_record_memory_growth_end, (void)) +#define __itt_heap_record_memory_growth_end ITTNOTIFY_VOID(heap_record_memory_growth_end) +#define __itt_heap_record_memory_growth_end_ptr ITTNOTIFY_NAME(heap_record_memory_growth_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_record_memory_growth_end() +#define __itt_heap_record_memory_growth_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_record_memory_growth_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Specify the type of heap detection/reporting to modify. + */ +/** + * @hideinitializer + * @brief Report on memory leaks. + */ +#define __itt_heap_leaks 0x00000001 + +/** + * @hideinitializer + * @brief Report on memory growth. + */ +#define __itt_heap_growth 0x00000002 + + +/** @brief heap reset detection */ +void ITTAPI __itt_heap_reset_detection(unsigned int reset_mask); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_reset_detection, (unsigned int reset_mask)) +#define __itt_heap_reset_detection ITTNOTIFY_VOID(heap_reset_detection) +#define __itt_heap_reset_detection_ptr ITTNOTIFY_NAME(heap_reset_detection) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_reset_detection() +#define __itt_heap_reset_detection_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_reset_detection_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @brief report */ +void ITTAPI __itt_heap_record(unsigned int record_mask); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, heap_record, (unsigned int record_mask)) +#define __itt_heap_record ITTNOTIFY_VOID(heap_record) +#define __itt_heap_record_ptr ITTNOTIFY_NAME(heap_record) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_heap_record() +#define __itt_heap_record_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_heap_record_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @} heap group */ +/** @endcond */ +/* ========================================================================== */ + +/** + * @defgroup domains Domains + * @ingroup public + * Domains group + * @{ + */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_domain +{ + volatile int flags; /*!< Zero if disabled, non-zero if enabled. The meaning of different non-zero values is reserved to the runtime */ + const char* nameA; /*!< Copy of original name in ASCII. */ +#if defined(UNICODE) || defined(_UNICODE) + const wchar_t* nameW; /*!< Copy of original name in UNICODE. */ +#else /* UNICODE || _UNICODE */ + void* nameW; +#endif /* UNICODE || _UNICODE */ + int extra1; /*!< Reserved to the runtime */ + void* extra2; /*!< Reserved to the runtime */ + struct ___itt_domain* next; +} __itt_domain; + +#pragma pack(pop) +/** @endcond */ + +/** + * @ingroup domains + * @brief Create a domain. + * Create domain using some domain name: the URI naming style is recommended. + * Because the set of domains is expected to be static over the application's + * execution time, there is no mechanism to destroy a domain. + * Any domain can be accessed by any thread in the process, regardless of + * which thread created the domain. This call is thread-safe. + * @param[in] name name of domain + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_domain* ITTAPI __itt_domain_createA(const char *name); +__itt_domain* ITTAPI __itt_domain_createW(const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_domain_create __itt_domain_createW +# define __itt_domain_create_ptr __itt_domain_createW_ptr +#else /* UNICODE */ +# define __itt_domain_create __itt_domain_createA +# define __itt_domain_create_ptr __itt_domain_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_domain* ITTAPI __itt_domain_create(const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_domain*, domain_createA, (const char *name)) +ITT_STUB(ITTAPI, __itt_domain*, domain_createW, (const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_domain*, domain_create, (const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_domain_createA ITTNOTIFY_DATA(domain_createA) +#define __itt_domain_createA_ptr ITTNOTIFY_NAME(domain_createA) +#define __itt_domain_createW ITTNOTIFY_DATA(domain_createW) +#define __itt_domain_createW_ptr ITTNOTIFY_NAME(domain_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_domain_create ITTNOTIFY_DATA(domain_create) +#define __itt_domain_create_ptr ITTNOTIFY_NAME(domain_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_domain_createA(name) (__itt_domain*)0 +#define __itt_domain_createA_ptr 0 +#define __itt_domain_createW(name) (__itt_domain*)0 +#define __itt_domain_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_domain_create(name) (__itt_domain*)0 +#define __itt_domain_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_domain_createA_ptr 0 +#define __itt_domain_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_domain_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} domains group */ + +/** + * @defgroup ids IDs + * @ingroup public + * IDs group + * @{ + */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_id +{ + unsigned long long d1, d2, d3; +} __itt_id; + +#pragma pack(pop) +/** @endcond */ + +const __itt_id __itt_null = { 0, 0, 0 }; + +/** + * @ingroup ids + * @brief A convenience function is provided to create an ID without domain control. + * @brief This is a convenience function to initialize an __itt_id structure. This function + * does not affect the collector runtime in any way. After you make the ID with this + * function, you still must create it with the __itt_id_create function before using the ID + * to identify a named entity. + * @param[in] addr The address of object; high QWORD of the ID value. + * @param[in] extra The extra data to unique identify object; low QWORD of the ID value. + */ + +ITT_INLINE __itt_id ITTAPI __itt_id_make(void* addr, unsigned long long extra) ITT_INLINE_ATTRIBUTE; +ITT_INLINE __itt_id ITTAPI __itt_id_make(void* addr, unsigned long long extra) +{ + __itt_id id = __itt_null; + id.d1 = (unsigned long long)((uintptr_t)addr); + id.d2 = (unsigned long long)extra; + id.d3 = (unsigned long long)0; /* Reserved. Must be zero */ + return id; +} + +/** + * @ingroup ids + * @brief Create an instance of identifier. + * This establishes the beginning of the lifetime of an instance of + * the given ID in the trace. Once this lifetime starts, the ID + * can be used to tag named entity instances in calls such as + * __itt_task_begin, and to specify relationships among + * identified named entity instances, using the \ref relations APIs. + * Instance IDs are not domain specific! + * @param[in] domain The domain controlling the execution of this call. + * @param[in] id The ID to create. + */ +void ITTAPI __itt_id_create(const __itt_domain *domain, __itt_id id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, id_create, (const __itt_domain *domain, __itt_id id)) +#define __itt_id_create(d,x) ITTNOTIFY_VOID_D1(id_create,d,x) +#define __itt_id_create_ptr ITTNOTIFY_NAME(id_create) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_id_create(domain,id) +#define __itt_id_create_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_id_create_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup ids + * @brief Destroy an instance of identifier. + * This ends the lifetime of the current instance of the given ID value in the trace. + * Any relationships that are established after this lifetime ends are invalid. + * This call must be performed before the given ID value can be reused for a different + * named entity instance. + * @param[in] domain The domain controlling the execution of this call. + * @param[in] id The ID to destroy. + */ +void ITTAPI __itt_id_destroy(const __itt_domain *domain, __itt_id id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, id_destroy, (const __itt_domain *domain, __itt_id id)) +#define __itt_id_destroy(d,x) ITTNOTIFY_VOID_D1(id_destroy,d,x) +#define __itt_id_destroy_ptr ITTNOTIFY_NAME(id_destroy) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_id_destroy(domain,id) +#define __itt_id_destroy_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_id_destroy_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} ids group */ + +/** + * @defgroup handless String Handles + * @ingroup public + * String Handles group + * @{ + */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_string_handle +{ + const char* strA; /*!< Copy of original string in ASCII. */ +#if defined(UNICODE) || defined(_UNICODE) + const wchar_t* strW; /*!< Copy of original string in UNICODE. */ +#else /* UNICODE || _UNICODE */ + void* strW; +#endif /* UNICODE || _UNICODE */ + int extra1; /*!< Reserved. Must be zero */ + void* extra2; /*!< Reserved. Must be zero */ + struct ___itt_string_handle* next; +} __itt_string_handle; + +#pragma pack(pop) +/** @endcond */ + +/** + * @ingroup handles + * @brief Create a string handle. + * Create and return handle value that can be associated with a string. + * Consecutive calls to __itt_string_handle_create with the same name + * return the same value. Because the set of string handles is expected to remain + * static during the application's execution time, there is no mechanism to destroy a string handle. + * Any string handle can be accessed by any thread in the process, regardless of which thread created + * the string handle. This call is thread-safe. + * @param[in] name The input string + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_string_handle* ITTAPI __itt_string_handle_createA(const char *name); +__itt_string_handle* ITTAPI __itt_string_handle_createW(const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_string_handle_create __itt_string_handle_createW +# define __itt_string_handle_create_ptr __itt_string_handle_createW_ptr +#else /* UNICODE */ +# define __itt_string_handle_create __itt_string_handle_createA +# define __itt_string_handle_create_ptr __itt_string_handle_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_string_handle* ITTAPI __itt_string_handle_create(const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createA, (const char *name)) +ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createW, (const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_create, (const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_string_handle_createA ITTNOTIFY_DATA(string_handle_createA) +#define __itt_string_handle_createA_ptr ITTNOTIFY_NAME(string_handle_createA) +#define __itt_string_handle_createW ITTNOTIFY_DATA(string_handle_createW) +#define __itt_string_handle_createW_ptr ITTNOTIFY_NAME(string_handle_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_string_handle_create ITTNOTIFY_DATA(string_handle_create) +#define __itt_string_handle_create_ptr ITTNOTIFY_NAME(string_handle_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_string_handle_createA(name) (__itt_string_handle*)0 +#define __itt_string_handle_createA_ptr 0 +#define __itt_string_handle_createW(name) (__itt_string_handle*)0 +#define __itt_string_handle_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_string_handle_create(name) (__itt_string_handle*)0 +#define __itt_string_handle_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_string_handle_createA_ptr 0 +#define __itt_string_handle_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_string_handle_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} handles group */ + +/** @cond exclude_from_documentation */ +typedef unsigned long long __itt_timestamp; +/** @endcond */ + +#define __itt_timestamp_none ((__itt_timestamp)-1LL) + +/** @cond exclude_from_gpa_documentation */ + +/** + * @ingroup timestamps + * @brief Return timestamp corresponding to the current moment. + * This returns the timestamp in the format that is the most relevant for the current + * host or platform (RDTSC, QPC, and others). You can use the "<" operator to + * compare __itt_timestamp values. + */ +__itt_timestamp ITTAPI __itt_get_timestamp(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, __itt_timestamp, get_timestamp, (void)) +#define __itt_get_timestamp ITTNOTIFY_DATA(get_timestamp) +#define __itt_get_timestamp_ptr ITTNOTIFY_NAME(get_timestamp) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_get_timestamp() +#define __itt_get_timestamp_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_get_timestamp_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} timestamps */ +/** @endcond */ + +/** @cond exclude_from_gpa_documentation */ + +/** + * @defgroup regions Regions + * @ingroup public + * Regions group + * @{ + */ +/** + * @ingroup regions + * @brief Begin of region instance. + * Successive calls to __itt_region_begin with the same ID are ignored + * until a call to __itt_region_end with the same ID + * @param[in] domain The domain for this region instance + * @param[in] id The instance ID for this region instance. Must not be __itt_null + * @param[in] parentid The instance ID for the parent of this region instance, or __itt_null + * @param[in] name The name of this region + */ +void ITTAPI __itt_region_begin(const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name); + +/** + * @ingroup regions + * @brief End of region instance. + * The first call to __itt_region_end with a given ID ends the + * region. Successive calls with the same ID are ignored, as are + * calls that do not have a matching __itt_region_begin call. + * @param[in] domain The domain for this region instance + * @param[in] id The instance ID for this region instance + */ +void ITTAPI __itt_region_end(const __itt_domain *domain, __itt_id id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, region_begin, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, region_end, (const __itt_domain *domain, __itt_id id)) +#define __itt_region_begin(d,x,y,z) ITTNOTIFY_VOID_D3(region_begin,d,x,y,z) +#define __itt_region_begin_ptr ITTNOTIFY_NAME(region_begin) +#define __itt_region_end(d,x) ITTNOTIFY_VOID_D1(region_end,d,x) +#define __itt_region_end_ptr ITTNOTIFY_NAME(region_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_region_begin(d,x,y,z) +#define __itt_region_begin_ptr 0 +#define __itt_region_end(d,x) +#define __itt_region_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_region_begin_ptr 0 +#define __itt_region_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} regions group */ + +/** + * @defgroup frames Frames + * @ingroup public + * Frames are similar to regions, but are intended to be easier to use and to implement. + * In particular: + * - Frames always represent periods of elapsed time + * - By default, frames have no nesting relationships + * @{ + */ + +/** + * @ingroup frames + * @brief Begin a frame instance. + * Successive calls to __itt_frame_begin with the + * same ID are ignored until a call to __itt_frame_end with the same ID. + * @param[in] domain The domain for this frame instance + * @param[in] id The instance ID for this frame instance or NULL + */ +void ITTAPI __itt_frame_begin_v3(const __itt_domain *domain, __itt_id *id); + +/** + * @ingroup frames + * @brief End a frame instance. + * The first call to __itt_frame_end with a given ID + * ends the frame. Successive calls with the same ID are ignored, as are + * calls that do not have a matching __itt_frame_begin call. + * @param[in] domain The domain for this frame instance + * @param[in] id The instance ID for this frame instance or NULL for current + */ +void ITTAPI __itt_frame_end_v3(const __itt_domain *domain, __itt_id *id); + +/** + * @ingroup frames + * @brief Submits a frame instance. + * Successive calls to __itt_frame_begin or __itt_frame_submit with the + * same ID are ignored until a call to __itt_frame_end or __itt_frame_submit + * with the same ID. + * Passing special __itt_timestamp_none value as "end" argument means + * take the current timestamp as the end timestamp. + * @param[in] domain The domain for this frame instance + * @param[in] id The instance ID for this frame instance or NULL + * @param[in] begin Timestamp of the beginning of the frame + * @param[in] end Timestamp of the end of the frame + */ +void ITTAPI __itt_frame_submit_v3(const __itt_domain *domain, __itt_id *id, + __itt_timestamp begin, __itt_timestamp end); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, frame_begin_v3, (const __itt_domain *domain, __itt_id *id)) +ITT_STUBV(ITTAPI, void, frame_end_v3, (const __itt_domain *domain, __itt_id *id)) +ITT_STUBV(ITTAPI, void, frame_submit_v3, (const __itt_domain *domain, __itt_id *id, __itt_timestamp begin, __itt_timestamp end)) +#define __itt_frame_begin_v3(d,x) ITTNOTIFY_VOID_D1(frame_begin_v3,d,x) +#define __itt_frame_begin_v3_ptr ITTNOTIFY_NAME(frame_begin_v3) +#define __itt_frame_end_v3(d,x) ITTNOTIFY_VOID_D1(frame_end_v3,d,x) +#define __itt_frame_end_v3_ptr ITTNOTIFY_NAME(frame_end_v3) +#define __itt_frame_submit_v3(d,x,b,e) ITTNOTIFY_VOID_D3(frame_submit_v3,d,x,b,e) +#define __itt_frame_submit_v3_ptr ITTNOTIFY_NAME(frame_submit_v3) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_frame_begin_v3(domain,id) +#define __itt_frame_begin_v3_ptr 0 +#define __itt_frame_end_v3(domain,id) +#define __itt_frame_end_v3_ptr 0 +#define __itt_frame_submit_v3(domain,id,begin,end) +#define __itt_frame_submit_v3_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_frame_begin_v3_ptr 0 +#define __itt_frame_end_v3_ptr 0 +#define __itt_frame_submit_v3_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} frames group */ +/** @endcond */ + +/** + * @defgroup taskgroup Task Group + * @ingroup public + * Task Group + * @{ + */ +/** + * @ingroup task_groups + * @brief Denotes a task_group instance. + * Successive calls to __itt_task_group with the same ID are ignored. + * @param[in] domain The domain for this task_group instance + * @param[in] id The instance ID for this task_group instance. Must not be __itt_null. + * @param[in] parentid The instance ID for the parent of this task_group instance, or __itt_null. + * @param[in] name The name of this task_group + */ +void ITTAPI __itt_task_group(const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, task_group, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name)) +#define __itt_task_group(d,x,y,z) ITTNOTIFY_VOID_D3(task_group,d,x,y,z) +#define __itt_task_group_ptr ITTNOTIFY_NAME(task_group) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_task_group(d,x,y,z) +#define __itt_task_group_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_task_group_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} taskgroup group */ + +/** + * @defgroup tasks Tasks + * @ingroup public + * A task instance represents a piece of work performed by a particular + * thread for a period of time. A call to __itt_task_begin creates a + * task instance. This becomes the current instance for that task on that + * thread. A following call to __itt_task_end on the same thread ends the + * instance. There may be multiple simultaneous instances of tasks with the + * same name on different threads. If an ID is specified, the task instance + * receives that ID. Nested tasks are allowed. + * + * Note: The task is defined by the bracketing of __itt_task_begin and + * __itt_task_end on the same thread. If some scheduling mechanism causes + * task switching (the thread executes a different user task) or task + * switching (the user task switches to a different thread) then this breaks + * the notion of current instance. Additional API calls are required to + * deal with that possibility. + * @{ + */ + +/** + * @ingroup tasks + * @brief Begin a task instance. + * @param[in] domain The domain for this task + * @param[in] taskid The instance ID for this task instance, or __itt_null + * @param[in] parentid The parent instance to which this task instance belongs, or __itt_null + * @param[in] name The name of this task + */ +void ITTAPI __itt_task_begin(const __itt_domain *domain, __itt_id taskid, __itt_id parentid, __itt_string_handle *name); + +/** + * @ingroup tasks + * @brief Begin a task instance. + * @param[in] domain The domain for this task + * @param[in] taskid The identifier for this task instance (may be 0) + * @param[in] parentid The parent of this task (may be 0) + * @param[in] fn The pointer to the function you are tracing + */ +void ITTAPI __itt_task_begin_fn(const __itt_domain *domain, __itt_id taskid, __itt_id parentid, void* fn); + +/** + * @ingroup tasks + * @brief End the current task instance. + * @param[in] domain The domain for this task + */ +void ITTAPI __itt_task_end(const __itt_domain *domain); + +/** + * @ingroup tasks + * @brief Begin an overlapped task instance. + * @param[in] domain The domain for this task. + * @param[in] taskid The identifier for this task instance, *cannot* be __itt_null. + * @param[in] parentid The parent of this task, or __itt_null. + * @param[in] name The name of this task. + */ +void ITTAPI __itt_task_begin_overlapped(const __itt_domain* domain, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); + +/** + * @ingroup tasks + * @brief End an overlapped task instance. + * @param[in] domain The domain for this task + * @param[in] taskid Explicit ID of finished task + */ +void ITTAPI __itt_task_end_overlapped(const __itt_domain *domain, __itt_id taskid); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, task_begin, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, task_begin_fn, (const __itt_domain *domain, __itt_id id, __itt_id parentid, void* fn)) +ITT_STUBV(ITTAPI, void, task_end, (const __itt_domain *domain)) +ITT_STUBV(ITTAPI, void, task_begin_overlapped, (const __itt_domain *domain, __itt_id taskid, __itt_id parentid, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, task_end_overlapped, (const __itt_domain *domain, __itt_id taskid)) +#define __itt_task_begin(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin,d,x,y,z) +#define __itt_task_begin_ptr ITTNOTIFY_NAME(task_begin) +#define __itt_task_begin_fn(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin_fn,d,x,y,z) +#define __itt_task_begin_fn_ptr ITTNOTIFY_NAME(task_begin_fn) +#define __itt_task_end(d) ITTNOTIFY_VOID_D0(task_end,d) +#define __itt_task_end_ptr ITTNOTIFY_NAME(task_end) +#define __itt_task_begin_overlapped(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin_overlapped,d,x,y,z) +#define __itt_task_begin_overlapped_ptr ITTNOTIFY_NAME(task_begin_overlapped) +#define __itt_task_end_overlapped(d,x) ITTNOTIFY_VOID_D1(task_end_overlapped,d,x) +#define __itt_task_end_overlapped_ptr ITTNOTIFY_NAME(task_end_overlapped) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_task_begin(domain,id,parentid,name) +#define __itt_task_begin_ptr 0 +#define __itt_task_begin_fn(domain,id,parentid,fn) +#define __itt_task_begin_fn_ptr 0 +#define __itt_task_end(domain) +#define __itt_task_end_ptr 0 +#define __itt_task_begin_overlapped(domain,taskid,parentid,name) +#define __itt_task_begin_overlapped_ptr 0 +#define __itt_task_end_overlapped(domain,taskid) +#define __itt_task_end_overlapped_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_task_begin_ptr 0 +#define __itt_task_begin_fn_ptr 0 +#define __itt_task_end_ptr 0 +#define __itt_task_begin_overlapped_ptr 0 +#define __itt_task_end_overlapped_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} tasks group */ + + +/** + * @defgroup markers Markers + * Markers represent a single discreet event in time. Markers have a scope, + * described by an enumerated type __itt_scope. Markers are created by + * the API call __itt_marker. A marker instance can be given an ID for use in + * adding metadata. + * @{ + */ + +/** + * @brief Describes the scope of an event object in the trace. + */ +typedef enum +{ + __itt_scope_unknown = 0, + __itt_scope_global, + __itt_scope_track_group, + __itt_scope_track, + __itt_scope_task, + __itt_scope_marker +} __itt_scope; + +/** @cond exclude_from_documentation */ +#define __itt_marker_scope_unknown __itt_scope_unknown +#define __itt_marker_scope_global __itt_scope_global +#define __itt_marker_scope_process __itt_scope_track_group +#define __itt_marker_scope_thread __itt_scope_track +#define __itt_marker_scope_task __itt_scope_task +/** @endcond */ + +/** + * @ingroup markers + * @brief Create a marker instance + * @param[in] domain The domain for this marker + * @param[in] id The instance ID for this marker or __itt_null + * @param[in] name The name for this marker + * @param[in] scope The scope for this marker + */ +void ITTAPI __itt_marker(const __itt_domain *domain, __itt_id id, __itt_string_handle *name, __itt_scope scope); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, marker, (const __itt_domain *domain, __itt_id id, __itt_string_handle *name, __itt_scope scope)) +#define __itt_marker(d,x,y,z) ITTNOTIFY_VOID_D3(marker,d,x,y,z) +#define __itt_marker_ptr ITTNOTIFY_NAME(marker) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_marker(domain,id,name,scope) +#define __itt_marker_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_marker_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} markers group */ + +/** + * @defgroup metadata Metadata + * The metadata API is used to attach extra information to named + * entities. Metadata can be attached to an identified named entity by ID, + * or to the current entity (which is always a task). + * + * Conceptually metadata has a type (what kind of metadata), a key (the + * name of the metadata), and a value (the actual data). The encoding of + * the value depends on the type of the metadata. + * + * The type of metadata is specified by an enumerated type __itt_metdata_type. + * @{ + */ + +/** + * @ingroup parameters + * @brief describes the type of metadata + */ +typedef enum { + __itt_metadata_unknown = 0, + __itt_metadata_u64, /**< Unsigned 64-bit integer */ + __itt_metadata_s64, /**< Signed 64-bit integer */ + __itt_metadata_u32, /**< Unsigned 32-bit integer */ + __itt_metadata_s32, /**< Signed 32-bit integer */ + __itt_metadata_u16, /**< Unsigned 16-bit integer */ + __itt_metadata_s16, /**< Signed 16-bit integer */ + __itt_metadata_float, /**< Signed 32-bit floating-point */ + __itt_metadata_double /**< SIgned 64-bit floating-point */ +} __itt_metadata_type; + +/** + * @ingroup parameters + * @brief Add metadata to an instance of a named entity. + * @param[in] domain The domain controlling the call + * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task + * @param[in] key The name of the metadata + * @param[in] type The type of the metadata + * @param[in] count The number of elements of the given type. If count == 0, no metadata will be added. + * @param[in] data The metadata itself +*/ +void ITTAPI __itt_metadata_add(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, metadata_add, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data)) +#define __itt_metadata_add(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(metadata_add,d,x,y,z,a,b) +#define __itt_metadata_add_ptr ITTNOTIFY_NAME(metadata_add) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_metadata_add(d,x,y,z,a,b) +#define __itt_metadata_add_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_metadata_add_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup parameters + * @brief Add string metadata to an instance of a named entity. + * @param[in] domain The domain controlling the call + * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task + * @param[in] key The name of the metadata + * @param[in] data The metadata itself + * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_metadata_str_addA(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length); +void ITTAPI __itt_metadata_str_addW(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const wchar_t *data, size_t length); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_metadata_str_add __itt_metadata_str_addW +# define __itt_metadata_str_add_ptr __itt_metadata_str_addW_ptr +#else /* UNICODE */ +# define __itt_metadata_str_add __itt_metadata_str_addA +# define __itt_metadata_str_add_ptr __itt_metadata_str_addA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_metadata_str_add(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length); +#endif + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, metadata_str_addA, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length)) +ITT_STUBV(ITTAPI, void, metadata_str_addW, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const wchar_t *data, size_t length)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, metadata_str_add, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_addA(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_addA,d,x,y,z,a) +#define __itt_metadata_str_addA_ptr ITTNOTIFY_NAME(metadata_str_addA) +#define __itt_metadata_str_addW(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_addW,d,x,y,z,a) +#define __itt_metadata_str_addW_ptr ITTNOTIFY_NAME(metadata_str_addW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add,d,x,y,z,a) +#define __itt_metadata_str_add_ptr ITTNOTIFY_NAME(metadata_str_add) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_addA(d,x,y,z,a) +#define __itt_metadata_str_addA_ptr 0 +#define __itt_metadata_str_addW(d,x,y,z,a) +#define __itt_metadata_str_addW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add(d,x,y,z,a) +#define __itt_metadata_str_add_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_addA_ptr 0 +#define __itt_metadata_str_addW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup parameters + * @brief Add metadata to an instance of a named entity. + * @param[in] domain The domain controlling the call + * @param[in] scope The scope of the instance to which the metadata is to be added + + * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task + + * @param[in] key The name of the metadata + * @param[in] type The type of the metadata + * @param[in] count The number of elements of the given type. If count == 0, no metadata will be added. + * @param[in] data The metadata itself +*/ +void ITTAPI __itt_metadata_add_with_scope(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, metadata_add_with_scope, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data)) +#define __itt_metadata_add_with_scope(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(metadata_add_with_scope,d,x,y,z,a,b) +#define __itt_metadata_add_with_scope_ptr ITTNOTIFY_NAME(metadata_add_with_scope) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_metadata_add_with_scope(d,x,y,z,a,b) +#define __itt_metadata_add_with_scope_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_metadata_add_with_scope_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup parameters + * @brief Add string metadata to an instance of a named entity. + * @param[in] domain The domain controlling the call + * @param[in] scope The scope of the instance to which the metadata is to be added + + * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task + + * @param[in] key The name of the metadata + * @param[in] data The metadata itself + * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_metadata_str_add_with_scopeA(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length); +void ITTAPI __itt_metadata_str_add_with_scopeW(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const wchar_t *data, size_t length); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_metadata_str_add_with_scope __itt_metadata_str_add_with_scopeW +# define __itt_metadata_str_add_with_scope_ptr __itt_metadata_str_add_with_scopeW_ptr +#else /* UNICODE */ +# define __itt_metadata_str_add_with_scope __itt_metadata_str_add_with_scopeA +# define __itt_metadata_str_add_with_scope_ptr __itt_metadata_str_add_with_scopeA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_metadata_str_add_with_scope(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length); +#endif + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeA, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length)) +ITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeW, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const wchar_t *data, size_t length)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, metadata_str_add_with_scope, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_add_with_scopeA(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add_with_scopeA,d,x,y,z,a) +#define __itt_metadata_str_add_with_scopeA_ptr ITTNOTIFY_NAME(metadata_str_add_with_scopeA) +#define __itt_metadata_str_add_with_scopeW(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add_with_scopeW,d,x,y,z,a) +#define __itt_metadata_str_add_with_scopeW_ptr ITTNOTIFY_NAME(metadata_str_add_with_scopeW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add_with_scope(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add_with_scope,d,x,y,z,a) +#define __itt_metadata_str_add_with_scope_ptr ITTNOTIFY_NAME(metadata_str_add_with_scope) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_add_with_scopeA(d,x,y,z,a) +#define __itt_metadata_str_add_with_scopeA_ptr 0 +#define __itt_metadata_str_add_with_scopeW(d,x,y,z,a) +#define __itt_metadata_str_add_with_scopeW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add_with_scope(d,x,y,z,a) +#define __itt_metadata_str_add_with_scope_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_metadata_str_add_with_scopeA_ptr 0 +#define __itt_metadata_str_add_with_scopeW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_metadata_str_add_with_scope_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @} metadata group */ + +/** + * @defgroup relations Relations + * Instances of named entities can be explicitly associated with other + * instances using instance IDs and the relationship API calls. + * + * @{ + */ + +/** + * @ingroup relations + * @brief The kind of relation between two instances is specified by the enumerated type __itt_relation. + * Relations between instances can be added with an API call. The relation + * API uses instance IDs. Relations can be added before or after the actual + * instances are created and persist independently of the instances. This + * is the motivation for having different lifetimes for instance IDs and + * the actual instances. + */ +typedef enum +{ + __itt_relation_is_unknown = 0, + __itt_relation_is_dependent_on, /**< "A is dependent on B" means that A cannot start until B completes */ + __itt_relation_is_sibling_of, /**< "A is sibling of B" means that A and B were created as a group */ + __itt_relation_is_parent_of, /**< "A is parent of B" means that A created B */ + __itt_relation_is_continuation_of, /**< "A is continuation of B" means that A assumes the dependencies of B */ + __itt_relation_is_child_of, /**< "A is child of B" means that A was created by B (inverse of is_parent_of) */ + __itt_relation_is_continued_by, /**< "A is continued by B" means that B assumes the dependencies of A (inverse of is_continuation_of) */ + __itt_relation_is_predecessor_to /**< "A is predecessor to B" means that B cannot start until A completes (inverse of is_dependent_on) */ +} __itt_relation; + +/** + * @ingroup relations + * @brief Add a relation to the current task instance. + * The current task instance is the head of the relation. + * @param[in] domain The domain controlling this call + * @param[in] relation The kind of relation + * @param[in] tail The ID for the tail of the relation + */ +void ITTAPI __itt_relation_add_to_current(const __itt_domain *domain, __itt_relation relation, __itt_id tail); + +/** + * @ingroup relations + * @brief Add a relation between two instance identifiers. + * @param[in] domain The domain controlling this call + * @param[in] head The ID for the head of the relation + * @param[in] relation The kind of relation + * @param[in] tail The ID for the tail of the relation + */ +void ITTAPI __itt_relation_add(const __itt_domain *domain, __itt_id head, __itt_relation relation, __itt_id tail); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, relation_add_to_current, (const __itt_domain *domain, __itt_relation relation, __itt_id tail)) +ITT_STUBV(ITTAPI, void, relation_add, (const __itt_domain *domain, __itt_id head, __itt_relation relation, __itt_id tail)) +#define __itt_relation_add_to_current(d,x,y) ITTNOTIFY_VOID_D2(relation_add_to_current,d,x,y) +#define __itt_relation_add_to_current_ptr ITTNOTIFY_NAME(relation_add_to_current) +#define __itt_relation_add(d,x,y,z) ITTNOTIFY_VOID_D3(relation_add,d,x,y,z) +#define __itt_relation_add_ptr ITTNOTIFY_NAME(relation_add) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_relation_add_to_current(d,x,y) +#define __itt_relation_add_to_current_ptr 0 +#define __itt_relation_add(d,x,y,z) +#define __itt_relation_add_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_relation_add_to_current_ptr 0 +#define __itt_relation_add_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} relations group */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_clock_info +{ + unsigned long long clock_freq; /*!< Clock domain frequency */ + unsigned long long clock_base; /*!< Clock domain base timestamp */ +} __itt_clock_info; + +#pragma pack(pop) +/** @endcond */ + +/** @cond exclude_from_documentation */ +typedef void (ITTAPI *__itt_get_clock_info_fn)(__itt_clock_info* clock_info, void* data); +/** @endcond */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_clock_domain +{ + __itt_clock_info info; /*!< Most recent clock domain info */ + __itt_get_clock_info_fn fn; /*!< Callback function pointer */ + void* fn_data; /*!< Input argument for the callback function */ + int extra1; /*!< Reserved. Must be zero */ + void* extra2; /*!< Reserved. Must be zero */ + struct ___itt_clock_domain* next; +} __itt_clock_domain; + +#pragma pack(pop) +/** @endcond */ + +/** + * @ingroup clockdomains + * @brief Create a clock domain. + * Certain applications require the capability to trace their application using + * a clock domain different than the CPU, for instance the instrumentation of events + * that occur on a GPU. + * Because the set of domains is expected to be static over the application's execution time, + * there is no mechanism to destroy a domain. + * Any domain can be accessed by any thread in the process, regardless of which thread created + * the domain. This call is thread-safe. + * @param[in] fn A pointer to a callback function which retrieves alternative CPU timestamps + * @param[in] fn_data Argument for a callback function; may be NULL + */ +__itt_clock_domain* ITTAPI __itt_clock_domain_create(__itt_get_clock_info_fn fn, void* fn_data); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, __itt_clock_domain*, clock_domain_create, (__itt_get_clock_info_fn fn, void* fn_data)) +#define __itt_clock_domain_create ITTNOTIFY_DATA(clock_domain_create) +#define __itt_clock_domain_create_ptr ITTNOTIFY_NAME(clock_domain_create) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_clock_domain_create(fn,fn_data) (__itt_clock_domain*)0 +#define __itt_clock_domain_create_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_clock_domain_create_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup clockdomains + * @brief Recalculate clock domains frequences and clock base timestamps. + */ +void ITTAPI __itt_clock_domain_reset(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, clock_domain_reset, (void)) +#define __itt_clock_domain_reset ITTNOTIFY_VOID(clock_domain_reset) +#define __itt_clock_domain_reset_ptr ITTNOTIFY_NAME(clock_domain_reset) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_clock_domain_reset() +#define __itt_clock_domain_reset_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_clock_domain_reset_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup clockdomain + * @brief Create an instance of identifier. This establishes the beginning of the lifetime of + * an instance of the given ID in the trace. Once this lifetime starts, the ID can be used to + * tag named entity instances in calls such as __itt_task_begin, and to specify relationships among + * identified named entity instances, using the \ref relations APIs. + * @param[in] domain The domain controlling the execution of this call. + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] id The ID to create. + */ +void ITTAPI __itt_id_create_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id); + +/** + * @ingroup clockdomain + * @brief Destroy an instance of identifier. This ends the lifetime of the current instance of the + * given ID value in the trace. Any relationships that are established after this lifetime ends are + * invalid. This call must be performed before the given ID value can be reused for a different + * named entity instance. + * @param[in] domain The domain controlling the execution of this call. + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] id The ID to destroy. + */ +void ITTAPI __itt_id_destroy_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, id_create_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id)) +ITT_STUBV(ITTAPI, void, id_destroy_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id)) +#define __itt_id_create_ex(d,x,y,z) ITTNOTIFY_VOID_D3(id_create_ex,d,x,y,z) +#define __itt_id_create_ex_ptr ITTNOTIFY_NAME(id_create_ex) +#define __itt_id_destroy_ex(d,x,y,z) ITTNOTIFY_VOID_D3(id_destroy_ex,d,x,y,z) +#define __itt_id_destroy_ex_ptr ITTNOTIFY_NAME(id_destroy_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_id_create_ex(domain,clock_domain,timestamp,id) +#define __itt_id_create_ex_ptr 0 +#define __itt_id_destroy_ex(domain,clock_domain,timestamp,id) +#define __itt_id_destroy_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_id_create_ex_ptr 0 +#define __itt_id_destroy_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup clockdomain + * @brief Begin a task instance. + * @param[in] domain The domain for this task + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] taskid The instance ID for this task instance, or __itt_null + * @param[in] parentid The parent instance to which this task instance belongs, or __itt_null + * @param[in] name The name of this task + */ +void ITTAPI __itt_task_begin_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); + +/** + * @ingroup clockdomain + * @brief Begin a task instance. + * @param[in] domain The domain for this task + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] taskid The identifier for this task instance, or __itt_null + * @param[in] parentid The parent of this task, or __itt_null + * @param[in] fn The pointer to the function you are tracing + */ +void ITTAPI __itt_task_begin_fn_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, void* fn); + +/** + * @ingroup clockdomain + * @brief End the current task instance. + * @param[in] domain The domain for this task + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + */ +void ITTAPI __itt_task_end_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, task_begin_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, task_begin_fn_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, void* fn)) +ITT_STUBV(ITTAPI, void, task_end_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp)) +#define __itt_task_begin_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(task_begin_ex,d,x,y,z,a,b) +#define __itt_task_begin_ex_ptr ITTNOTIFY_NAME(task_begin_ex) +#define __itt_task_begin_fn_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(task_begin_fn_ex,d,x,y,z,a,b) +#define __itt_task_begin_fn_ex_ptr ITTNOTIFY_NAME(task_begin_fn_ex) +#define __itt_task_end_ex(d,x,y) ITTNOTIFY_VOID_D2(task_end_ex,d,x,y) +#define __itt_task_end_ex_ptr ITTNOTIFY_NAME(task_end_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_task_begin_ex(domain,clock_domain,timestamp,id,parentid,name) +#define __itt_task_begin_ex_ptr 0 +#define __itt_task_begin_fn_ex(domain,clock_domain,timestamp,id,parentid,fn) +#define __itt_task_begin_fn_ex_ptr 0 +#define __itt_task_end_ex(domain,clock_domain,timestamp) +#define __itt_task_end_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_task_begin_ex_ptr 0 +#define __itt_task_begin_fn_ex_ptr 0 +#define __itt_task_end_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @defgroup counters Counters + * @ingroup public + * Counters are user-defined objects with a monotonically increasing + * value. Counter values are 64-bit unsigned integers. + * Counters have names that can be displayed in + * the tools. + * @{ + */ + +/** + * @brief opaque structure for counter identification + */ +/** @cond exclude_from_documentation */ + +typedef struct ___itt_counter* __itt_counter; + +/** + * @brief Create an unsigned 64 bits integer counter with given name/domain + * + * After __itt_counter_create() is called, __itt_counter_inc(id), __itt_counter_inc_delta(id, delta), + * __itt_counter_set_value(id, value_ptr) or __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) + * can be used to change the value of the counter, where value_ptr is a pointer to an unsigned 64 bits integer + * + * The call is equal to __itt_counter_create_typed(name, domain, __itt_metadata_u64) + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_counter ITTAPI __itt_counter_createA(const char *name, const char *domain); +__itt_counter ITTAPI __itt_counter_createW(const wchar_t *name, const wchar_t *domain); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_counter_create __itt_counter_createW +# define __itt_counter_create_ptr __itt_counter_createW_ptr +#else /* UNICODE */ +# define __itt_counter_create __itt_counter_createA +# define __itt_counter_create_ptr __itt_counter_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_counter ITTAPI __itt_counter_create(const char *name, const char *domain); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char *name, const char *domain)) +ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_counter, counter_create, (const char *name, const char *domain)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA ITTNOTIFY_DATA(counter_createA) +#define __itt_counter_createA_ptr ITTNOTIFY_NAME(counter_createA) +#define __itt_counter_createW ITTNOTIFY_DATA(counter_createW) +#define __itt_counter_createW_ptr ITTNOTIFY_NAME(counter_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create ITTNOTIFY_DATA(counter_create) +#define __itt_counter_create_ptr ITTNOTIFY_NAME(counter_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA(name, domain) +#define __itt_counter_createA_ptr 0 +#define __itt_counter_createW(name, domain) +#define __itt_counter_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create(name, domain) +#define __itt_counter_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA_ptr 0 +#define __itt_counter_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Increment the unsigned 64 bits integer counter value + * + * Calling this function to non-unsigned 64 bits integer counters has no effect + */ +void ITTAPI __itt_counter_inc(__itt_counter id); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id)) +#define __itt_counter_inc ITTNOTIFY_VOID(counter_inc) +#define __itt_counter_inc_ptr ITTNOTIFY_NAME(counter_inc) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_inc(id) +#define __itt_counter_inc_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_inc_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** + * @brief Increment the unsigned 64 bits integer counter value with x + * + * Calling this function to non-unsigned 64 bits integer counters has no effect + */ +void ITTAPI __itt_counter_inc_delta(__itt_counter id, unsigned long long value); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value)) +#define __itt_counter_inc_delta ITTNOTIFY_VOID(counter_inc_delta) +#define __itt_counter_inc_delta_ptr ITTNOTIFY_NAME(counter_inc_delta) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_inc_delta(id, value) +#define __itt_counter_inc_delta_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_inc_delta_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Decrement the unsigned 64 bits integer counter value + * + * Calling this function to non-unsigned 64 bits integer counters has no effect + */ +void ITTAPI __itt_counter_dec(__itt_counter id); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_dec, (__itt_counter id)) +#define __itt_counter_dec ITTNOTIFY_VOID(counter_dec) +#define __itt_counter_dec_ptr ITTNOTIFY_NAME(counter_dec) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_dec(id) +#define __itt_counter_dec_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_dec_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** + * @brief Decrement the unsigned 64 bits integer counter value with x + * + * Calling this function to non-unsigned 64 bits integer counters has no effect + */ +void ITTAPI __itt_counter_dec_delta(__itt_counter id, unsigned long long value); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_dec_delta, (__itt_counter id, unsigned long long value)) +#define __itt_counter_dec_delta ITTNOTIFY_VOID(counter_dec_delta) +#define __itt_counter_dec_delta_ptr ITTNOTIFY_NAME(counter_dec_delta) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_dec_delta(id, value) +#define __itt_counter_dec_delta_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_dec_delta_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup counters + * @brief Increment a counter by one. + * The first call with a given name creates a counter by that name and sets its + * value to zero. Successive calls increment the counter value. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + */ +void ITTAPI __itt_counter_inc_v3(const __itt_domain *domain, __itt_string_handle *name); + +/** + * @ingroup counters + * @brief Increment a counter by the value specified in delta. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + * @param[in] delta The amount by which to increment the counter + */ +void ITTAPI __itt_counter_inc_delta_v3(const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_inc_v3, (const __itt_domain *domain, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, counter_inc_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta)) +#define __itt_counter_inc_v3(d,x) ITTNOTIFY_VOID_D1(counter_inc_v3,d,x) +#define __itt_counter_inc_v3_ptr ITTNOTIFY_NAME(counter_inc_v3) +#define __itt_counter_inc_delta_v3(d,x,y) ITTNOTIFY_VOID_D2(counter_inc_delta_v3,d,x,y) +#define __itt_counter_inc_delta_v3_ptr ITTNOTIFY_NAME(counter_inc_delta_v3) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_inc_v3(domain,name) +#define __itt_counter_inc_v3_ptr 0 +#define __itt_counter_inc_delta_v3(domain,name,delta) +#define __itt_counter_inc_delta_v3_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_inc_v3_ptr 0 +#define __itt_counter_inc_delta_v3_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + + +/** + * @ingroup counters + * @brief Decrement a counter by one. + * The first call with a given name creates a counter by that name and sets its + * value to zero. Successive calls decrement the counter value. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + */ +void ITTAPI __itt_counter_dec_v3(const __itt_domain *domain, __itt_string_handle *name); + +/** + * @ingroup counters + * @brief Decrement a counter by the value specified in delta. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + * @param[in] delta The amount by which to decrement the counter + */ +void ITTAPI __itt_counter_dec_delta_v3(const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_dec_v3, (const __itt_domain *domain, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, counter_dec_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta)) +#define __itt_counter_dec_v3(d,x) ITTNOTIFY_VOID_D1(counter_dec_v3,d,x) +#define __itt_counter_dec_v3_ptr ITTNOTIFY_NAME(counter_dec_v3) +#define __itt_counter_dec_delta_v3(d,x,y) ITTNOTIFY_VOID_D2(counter_dec_delta_v3,d,x,y) +#define __itt_counter_dec_delta_v3_ptr ITTNOTIFY_NAME(counter_dec_delta_v3) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_dec_v3(domain,name) +#define __itt_counter_dec_v3_ptr 0 +#define __itt_counter_dec_delta_v3(domain,name,delta) +#define __itt_counter_dec_delta_v3_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_dec_v3_ptr 0 +#define __itt_counter_dec_delta_v3_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @} counters group */ + + +/** + * @brief Set the counter value + */ +void ITTAPI __itt_counter_set_value(__itt_counter id, void *value_ptr); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_set_value, (__itt_counter id, void *value_ptr)) +#define __itt_counter_set_value ITTNOTIFY_VOID(counter_set_value) +#define __itt_counter_set_value_ptr ITTNOTIFY_NAME(counter_set_value) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_set_value(id, value_ptr) +#define __itt_counter_set_value_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_set_value_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Set the counter value + */ +void ITTAPI __itt_counter_set_value_ex(__itt_counter id, __itt_clock_domain *clock_domain, unsigned long long timestamp, void *value_ptr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_set_value_ex, (__itt_counter id, __itt_clock_domain *clock_domain, unsigned long long timestamp, void *value_ptr)) +#define __itt_counter_set_value_ex ITTNOTIFY_VOID(counter_set_value_ex) +#define __itt_counter_set_value_ex_ptr ITTNOTIFY_NAME(counter_set_value_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) +#define __itt_counter_set_value_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_set_value_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Create a typed counter with given name/domain + * + * After __itt_counter_create_typed() is called, __itt_counter_inc(id), __itt_counter_inc_delta(id, delta), + * __itt_counter_set_value(id, value_ptr) or __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) + * can be used to change the value of the counter + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_counter ITTAPI __itt_counter_create_typedA(const char *name, const char *domain, __itt_metadata_type type); +__itt_counter ITTAPI __itt_counter_create_typedW(const wchar_t *name, const wchar_t *domain, __itt_metadata_type type); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_counter_create_typed __itt_counter_create_typedW +# define __itt_counter_create_typed_ptr __itt_counter_create_typedW_ptr +#else /* UNICODE */ +# define __itt_counter_create_typed __itt_counter_create_typedA +# define __itt_counter_create_typed_ptr __itt_counter_create_typedA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_counter ITTAPI __itt_counter_create_typed(const char *name, const char *domain, __itt_metadata_type type); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_counter, counter_create_typedA, (const char *name, const char *domain, __itt_metadata_type type)) +ITT_STUB(ITTAPI, __itt_counter, counter_create_typedW, (const wchar_t *name, const wchar_t *domain, __itt_metadata_type type)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_counter, counter_create_typed, (const char *name, const char *domain, __itt_metadata_type type)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_create_typedA ITTNOTIFY_DATA(counter_create_typedA) +#define __itt_counter_create_typedA_ptr ITTNOTIFY_NAME(counter_create_typedA) +#define __itt_counter_create_typedW ITTNOTIFY_DATA(counter_create_typedW) +#define __itt_counter_create_typedW_ptr ITTNOTIFY_NAME(counter_create_typedW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_typed ITTNOTIFY_DATA(counter_create_typed) +#define __itt_counter_create_typed_ptr ITTNOTIFY_NAME(counter_create_typed) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_create_typedA(name, domain, type) +#define __itt_counter_create_typedA_ptr 0 +#define __itt_counter_create_typedW(name, domain, type) +#define __itt_counter_create_typedW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_typed(name, domain, type) +#define __itt_counter_create_typed_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_create_typedA_ptr 0 +#define __itt_counter_create_typedW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_typed_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Destroy the counter identified by the pointer previously returned by __itt_counter_create() or + * __itt_counter_create_typed() + */ +void ITTAPI __itt_counter_destroy(__itt_counter id); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id)) +#define __itt_counter_destroy ITTNOTIFY_VOID(counter_destroy) +#define __itt_counter_destroy_ptr ITTNOTIFY_NAME(counter_destroy) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_destroy(id) +#define __itt_counter_destroy_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_destroy_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} counters group */ + +/** + * @ingroup markers + * @brief Create a marker instance. + * @param[in] domain The domain for this marker + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] id The instance ID for this marker, or __itt_null + * @param[in] name The name for this marker + * @param[in] scope The scope for this marker + */ +void ITTAPI __itt_marker_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, marker_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope)) +#define __itt_marker_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(marker_ex,d,x,y,z,a,b) +#define __itt_marker_ex_ptr ITTNOTIFY_NAME(marker_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_marker_ex(domain,clock_domain,timestamp,id,name,scope) +#define __itt_marker_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_marker_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup clockdomain + * @brief Add a relation to the current task instance. + * The current task instance is the head of the relation. + * @param[in] domain The domain controlling this call + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] relation The kind of relation + * @param[in] tail The ID for the tail of the relation + */ +void ITTAPI __itt_relation_add_to_current_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail); + +/** + * @ingroup clockdomain + * @brief Add a relation between two instance identifiers. + * @param[in] domain The domain controlling this call + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] head The ID for the head of the relation + * @param[in] relation The kind of relation + * @param[in] tail The ID for the tail of the relation + */ +void ITTAPI __itt_relation_add_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, relation_add_to_current_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail)) +ITT_STUBV(ITTAPI, void, relation_add_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail)) +#define __itt_relation_add_to_current_ex(d,x,y,z,a) ITTNOTIFY_VOID_D4(relation_add_to_current_ex,d,x,y,z,a) +#define __itt_relation_add_to_current_ex_ptr ITTNOTIFY_NAME(relation_add_to_current_ex) +#define __itt_relation_add_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(relation_add_ex,d,x,y,z,a,b) +#define __itt_relation_add_ex_ptr ITTNOTIFY_NAME(relation_add_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_relation_add_to_current_ex(domain,clock_domain,timestame,relation,tail) +#define __itt_relation_add_to_current_ex_ptr 0 +#define __itt_relation_add_ex(domain,clock_domain,timestamp,head,relation,tail) +#define __itt_relation_add_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_relation_add_to_current_ex_ptr 0 +#define __itt_relation_add_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @cond exclude_from_documentation */ +typedef enum ___itt_track_group_type +{ + __itt_track_group_type_normal = 0 +} __itt_track_group_type; +/** @endcond */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_track_group +{ + __itt_string_handle* name; /*!< Name of the track group */ + struct ___itt_track* track; /*!< List of child tracks */ + __itt_track_group_type tgtype; /*!< Type of the track group */ + int extra1; /*!< Reserved. Must be zero */ + void* extra2; /*!< Reserved. Must be zero */ + struct ___itt_track_group* next; +} __itt_track_group; + +#pragma pack(pop) +/** @endcond */ + +/** + * @brief Placeholder for custom track types. Currently, "normal" custom track + * is the only available track type. + */ +typedef enum ___itt_track_type +{ + __itt_track_type_normal = 0 +#ifdef INTEL_ITTNOTIFY_API_PRIVATE + , __itt_track_type_queue +#endif /* INTEL_ITTNOTIFY_API_PRIVATE */ +} __itt_track_type; + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_track +{ + __itt_string_handle* name; /*!< Name of the track group */ + __itt_track_group* group; /*!< Parent group to a track */ + __itt_track_type ttype; /*!< Type of the track */ + int extra1; /*!< Reserved. Must be zero */ + void* extra2; /*!< Reserved. Must be zero */ + struct ___itt_track* next; +} __itt_track; + +#pragma pack(pop) +/** @endcond */ + +/** + * @brief Create logical track group. + */ +__itt_track_group* ITTAPI __itt_track_group_create(__itt_string_handle* name, __itt_track_group_type track_group_type); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, __itt_track_group*, track_group_create, (__itt_string_handle* name, __itt_track_group_type track_group_type)) +#define __itt_track_group_create ITTNOTIFY_DATA(track_group_create) +#define __itt_track_group_create_ptr ITTNOTIFY_NAME(track_group_create) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_track_group_create(name) (__itt_track_group*)0 +#define __itt_track_group_create_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_track_group_create_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Create logical track. + */ +__itt_track* ITTAPI __itt_track_create(__itt_track_group* track_group, __itt_string_handle* name, __itt_track_type track_type); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, __itt_track*, track_create, (__itt_track_group* track_group,__itt_string_handle* name, __itt_track_type track_type)) +#define __itt_track_create ITTNOTIFY_DATA(track_create) +#define __itt_track_create_ptr ITTNOTIFY_NAME(track_create) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_track_create(track_group,name,track_type) (__itt_track*)0 +#define __itt_track_create_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_track_create_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Set the logical track. + */ +void ITTAPI __itt_set_track(__itt_track* track); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, set_track, (__itt_track *track)) +#define __itt_set_track ITTNOTIFY_VOID(set_track) +#define __itt_set_track_ptr ITTNOTIFY_NAME(set_track) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_set_track(track) +#define __itt_set_track_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_set_track_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/* ========================================================================== */ +/** @cond exclude_from_gpa_documentation */ +/** + * @defgroup events Events + * @ingroup public + * Events group + * @{ + */ +/** @brief user event type */ +typedef int __itt_event; + +/** + * @brief Create an event notification + * @note name or namelen being null/name and namelen not matching, user event feature not enabled + * @return non-zero event identifier upon success and __itt_err otherwise + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_event LIBITTAPI __itt_event_createA(const char *name, int namelen); +__itt_event LIBITTAPI __itt_event_createW(const wchar_t *name, int namelen); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_event_create __itt_event_createW +# define __itt_event_create_ptr __itt_event_createW_ptr +#else +# define __itt_event_create __itt_event_createA +# define __itt_event_create_ptr __itt_event_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_event LIBITTAPI __itt_event_create(const char *name, int namelen); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char *name, int namelen)) +ITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(LIBITTAPI, __itt_event, event_create, (const char *name, int namelen)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_event_createA ITTNOTIFY_DATA(event_createA) +#define __itt_event_createA_ptr ITTNOTIFY_NAME(event_createA) +#define __itt_event_createW ITTNOTIFY_DATA(event_createW) +#define __itt_event_createW_ptr ITTNOTIFY_NAME(event_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_event_create ITTNOTIFY_DATA(event_create) +#define __itt_event_create_ptr ITTNOTIFY_NAME(event_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_event_createA(name, namelen) (__itt_event)0 +#define __itt_event_createA_ptr 0 +#define __itt_event_createW(name, namelen) (__itt_event)0 +#define __itt_event_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_event_create(name, namelen) (__itt_event)0 +#define __itt_event_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_event_createA_ptr 0 +#define __itt_event_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_event_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an event occurrence. + * @return __itt_err upon failure (invalid event id/user event feature not enabled) + */ +int LIBITTAPI __itt_event_start(__itt_event event); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(LIBITTAPI, int, event_start, (__itt_event event)) +#define __itt_event_start ITTNOTIFY_DATA(event_start) +#define __itt_event_start_ptr ITTNOTIFY_NAME(event_start) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_event_start(event) (int)0 +#define __itt_event_start_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_event_start_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Record an event end occurrence. + * @note It is optional if events do not have durations. + * @return __itt_err upon failure (invalid event id/user event feature not enabled) + */ +int LIBITTAPI __itt_event_end(__itt_event event); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event)) +#define __itt_event_end ITTNOTIFY_DATA(event_end) +#define __itt_event_end_ptr ITTNOTIFY_NAME(event_end) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_event_end(event) (int)0 +#define __itt_event_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_event_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} events group */ + + +/** + * @defgroup arrays Arrays Visualizer + * @ingroup public + * Visualize arrays + * @{ + */ + +/** + * @enum __itt_av_data_type + * @brief Defines types of arrays data (for C/C++ intrinsic types) + */ +typedef enum +{ + __itt_e_first = 0, + __itt_e_char = 0, /* 1-byte integer */ + __itt_e_uchar, /* 1-byte unsigned integer */ + __itt_e_int16, /* 2-byte integer */ + __itt_e_uint16, /* 2-byte unsigned integer */ + __itt_e_int32, /* 4-byte integer */ + __itt_e_uint32, /* 4-byte unsigned integer */ + __itt_e_int64, /* 8-byte integer */ + __itt_e_uint64, /* 8-byte unsigned integer */ + __itt_e_float, /* 4-byte floating */ + __itt_e_double, /* 8-byte floating */ + __itt_e_last = __itt_e_double +} __itt_av_data_type; + +/** + * @brief Save an array data to a file. + * Output format is defined by the file extension. The csv and bmp formats are supported (bmp - for 2-dimensional array only). + * @param[in] data - pointer to the array data + * @param[in] rank - the rank of the array + * @param[in] dimensions - pointer to an array of integers, which specifies the array dimensions. + * The size of dimensions must be equal to the rank + * @param[in] type - the type of the array, specified as one of the __itt_av_data_type values (for intrinsic types) + * @param[in] filePath - the file path; the output format is defined by the file extension + * @param[in] columnOrder - defines how the array is stored in the linear memory. + * It should be 1 for column-major order (e.g. in FORTRAN) or 0 - for row-major order (e.g. in C). + */ + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +int ITTAPI __itt_av_saveA(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder); +int ITTAPI __itt_av_saveW(void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_av_save __itt_av_saveW +# define __itt_av_save_ptr __itt_av_saveW_ptr +#else /* UNICODE */ +# define __itt_av_save __itt_av_saveA +# define __itt_av_save_ptr __itt_av_saveA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +int ITTAPI __itt_av_save(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, int, av_saveA, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder)) +ITT_STUB(ITTAPI, int, av_saveW, (void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, int, av_save, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_av_saveA ITTNOTIFY_DATA(av_saveA) +#define __itt_av_saveA_ptr ITTNOTIFY_NAME(av_saveA) +#define __itt_av_saveW ITTNOTIFY_DATA(av_saveW) +#define __itt_av_saveW_ptr ITTNOTIFY_NAME(av_saveW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_av_save ITTNOTIFY_DATA(av_save) +#define __itt_av_save_ptr ITTNOTIFY_NAME(av_save) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_av_saveA(name) +#define __itt_av_saveA_ptr 0 +#define __itt_av_saveW(name) +#define __itt_av_saveW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_av_save(name) +#define __itt_av_save_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_av_saveA_ptr 0 +#define __itt_av_saveW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_av_save_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +void ITTAPI __itt_enable_attach(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, enable_attach, (void)) +#define __itt_enable_attach ITTNOTIFY_VOID(enable_attach) +#define __itt_enable_attach_ptr ITTNOTIFY_NAME(enable_attach) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_enable_attach() +#define __itt_enable_attach_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_enable_attach_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @cond exclude_from_gpa_documentation */ + +/** @} arrays group */ + +/** @endcond */ + +/** + * @brief Module load info + * This API is used to report necessary information in case of module relocation + * @param[in] start_addr - relocated module start address + * @param[in] end_addr - relocated module end address + * @param[in] path - file system path to the module + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_module_loadA(void *start_addr, void *end_addr, const char *path); +void ITTAPI __itt_module_loadW(void *start_addr, void *end_addr, const wchar_t *path); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_module_load __itt_module_loadW +# define __itt_module_load_ptr __itt_module_loadW_ptr +#else /* UNICODE */ +# define __itt_module_load __itt_module_loadA +# define __itt_module_load_ptr __itt_module_loadA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_module_load(void *start_addr, void *end_addr, const char *path); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, void, module_loadA, (void *start_addr, void *end_addr, const char *path)) +ITT_STUB(ITTAPI, void, module_loadW, (void *start_addr, void *end_addr, const wchar_t *path)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, void, module_load, (void *start_addr, void *end_addr, const char *path)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_module_loadA ITTNOTIFY_VOID(module_loadA) +#define __itt_module_loadA_ptr ITTNOTIFY_NAME(module_loadA) +#define __itt_module_loadW ITTNOTIFY_VOID(module_loadW) +#define __itt_module_loadW_ptr ITTNOTIFY_NAME(module_loadW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_module_load ITTNOTIFY_VOID(module_load) +#define __itt_module_load_ptr ITTNOTIFY_NAME(module_load) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_module_loadA(start_addr, end_addr, path) +#define __itt_module_loadA_ptr 0 +#define __itt_module_loadW(start_addr, end_addr, path) +#define __itt_module_loadW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_module_load(start_addr, end_addr, path) +#define __itt_module_load_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_module_loadA_ptr 0 +#define __itt_module_loadW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_module_load_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _ITTNOTIFY_H_ */ + +#ifdef INTEL_ITTNOTIFY_API_PRIVATE + +#ifndef _ITTNOTIFY_PRIVATE_ +#define _ITTNOTIFY_PRIVATE_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @ingroup clockdomain + * @brief Begin an overlapped task instance. + * @param[in] domain The domain for this task + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] taskid The identifier for this task instance, *cannot* be __itt_null. + * @param[in] parentid The parent of this task, or __itt_null. + * @param[in] name The name of this task. + */ +void ITTAPI __itt_task_begin_overlapped_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); + +/** + * @ingroup clockdomain + * @brief End an overlapped task instance. + * @param[in] domain The domain for this task + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] taskid Explicit ID of finished task + */ +void ITTAPI __itt_task_end_overlapped_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, task_begin_overlapped_ex, (const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name)) +ITT_STUBV(ITTAPI, void, task_end_overlapped_ex, (const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid)) +#define __itt_task_begin_overlapped_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(task_begin_overlapped_ex,d,x,y,z,a,b) +#define __itt_task_begin_overlapped_ex_ptr ITTNOTIFY_NAME(task_begin_overlapped_ex) +#define __itt_task_end_overlapped_ex(d,x,y,z) ITTNOTIFY_VOID_D3(task_end_overlapped_ex,d,x,y,z) +#define __itt_task_end_overlapped_ex_ptr ITTNOTIFY_NAME(task_end_overlapped_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_task_begin_overlapped_ex(domain,clock_domain,timestamp,taskid,parentid,name) +#define __itt_task_begin_overlapped_ex_ptr 0 +#define __itt_task_end_overlapped_ex(domain,clock_domain,timestamp,taskid) +#define __itt_task_end_overlapped_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_task_begin_overlapped_ex_ptr 0 +#define __itt_task_end_overlapped_ptr 0 +#define __itt_task_end_overlapped_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @defgroup makrs_internal Marks + * @ingroup internal + * Marks group + * @warning Internal API: + * - It is not shipped to outside of Intel + * - It is delivered to internal Intel teams using e-mail or SVN access only + * @{ + */ +/** @brief user mark type */ +typedef int __itt_mark_type; + +/** + * @brief Creates a user mark type with the specified name using char or Unicode string. + * @param[in] name - name of mark to create + * @return Returns a handle to the mark type + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_mark_type ITTAPI __itt_mark_createA(const char *name); +__itt_mark_type ITTAPI __itt_mark_createW(const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_mark_create __itt_mark_createW +# define __itt_mark_create_ptr __itt_mark_createW_ptr +#else /* UNICODE */ +# define __itt_mark_create __itt_mark_createA +# define __itt_mark_create_ptr __itt_mark_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_mark_type ITTAPI __itt_mark_create(const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_mark_type, mark_createA, (const char *name)) +ITT_STUB(ITTAPI, __itt_mark_type, mark_createW, (const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_mark_type, mark_create, (const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_createA ITTNOTIFY_DATA(mark_createA) +#define __itt_mark_createA_ptr ITTNOTIFY_NAME(mark_createA) +#define __itt_mark_createW ITTNOTIFY_DATA(mark_createW) +#define __itt_mark_createW_ptr ITTNOTIFY_NAME(mark_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_create ITTNOTIFY_DATA(mark_create) +#define __itt_mark_create_ptr ITTNOTIFY_NAME(mark_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_createA(name) (__itt_mark_type)0 +#define __itt_mark_createA_ptr 0 +#define __itt_mark_createW(name) (__itt_mark_type)0 +#define __itt_mark_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_create(name) (__itt_mark_type)0 +#define __itt_mark_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_createA_ptr 0 +#define __itt_mark_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Creates a "discrete" user mark type of the specified type and an optional parameter using char or Unicode string. + * + * - The mark of "discrete" type is placed to collection results in case of success. It appears in overtime view(s) as a special tick sign. + * - The call is "synchronous" - function returns after mark is actually added to results. + * - This function is useful, for example, to mark different phases of application + * (beginning of the next mark automatically meand end of current region). + * - Can be used together with "continuous" marks (see below) at the same collection session + * @param[in] mt - mark, created by __itt_mark_create(const char* name) function + * @param[in] parameter - string parameter of mark + * @return Returns zero value in case of success, non-zero value otherwise. + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +int ITTAPI __itt_markA(__itt_mark_type mt, const char *parameter); +int ITTAPI __itt_markW(__itt_mark_type mt, const wchar_t *parameter); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_mark __itt_markW +# define __itt_mark_ptr __itt_markW_ptr +#else /* UNICODE */ +# define __itt_mark __itt_markA +# define __itt_mark_ptr __itt_markA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +int ITTAPI __itt_mark(__itt_mark_type mt, const char *parameter); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, int, markA, (__itt_mark_type mt, const char *parameter)) +ITT_STUB(ITTAPI, int, markW, (__itt_mark_type mt, const wchar_t *parameter)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, int, mark, (__itt_mark_type mt, const char *parameter)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_markA ITTNOTIFY_DATA(markA) +#define __itt_markA_ptr ITTNOTIFY_NAME(markA) +#define __itt_markW ITTNOTIFY_DATA(markW) +#define __itt_markW_ptr ITTNOTIFY_NAME(markW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark ITTNOTIFY_DATA(mark) +#define __itt_mark_ptr ITTNOTIFY_NAME(mark) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_markA(mt, parameter) (int)0 +#define __itt_markA_ptr 0 +#define __itt_markW(mt, parameter) (int)0 +#define __itt_markW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark(mt, parameter) (int)0 +#define __itt_mark_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_markA_ptr 0 +#define __itt_markW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Use this if necessary to create a "discrete" user event type (mark) for process + * rather then for one thread + * @see int __itt_mark(__itt_mark_type mt, const char* parameter); + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +int ITTAPI __itt_mark_globalA(__itt_mark_type mt, const char *parameter); +int ITTAPI __itt_mark_globalW(__itt_mark_type mt, const wchar_t *parameter); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_mark_global __itt_mark_globalW +# define __itt_mark_global_ptr __itt_mark_globalW_ptr +#else /* UNICODE */ +# define __itt_mark_global __itt_mark_globalA +# define __itt_mark_global_ptr __itt_mark_globalA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +int ITTAPI __itt_mark_global(__itt_mark_type mt, const char *parameter); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, int, mark_globalA, (__itt_mark_type mt, const char *parameter)) +ITT_STUB(ITTAPI, int, mark_globalW, (__itt_mark_type mt, const wchar_t *parameter)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, int, mark_global, (__itt_mark_type mt, const char *parameter)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_globalA ITTNOTIFY_DATA(mark_globalA) +#define __itt_mark_globalA_ptr ITTNOTIFY_NAME(mark_globalA) +#define __itt_mark_globalW ITTNOTIFY_DATA(mark_globalW) +#define __itt_mark_globalW_ptr ITTNOTIFY_NAME(mark_globalW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_global ITTNOTIFY_DATA(mark_global) +#define __itt_mark_global_ptr ITTNOTIFY_NAME(mark_global) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_globalA(mt, parameter) (int)0 +#define __itt_mark_globalA_ptr 0 +#define __itt_mark_globalW(mt, parameter) (int)0 +#define __itt_mark_globalW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_global(mt, parameter) (int)0 +#define __itt_mark_global_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_mark_globalA_ptr 0 +#define __itt_mark_globalW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_mark_global_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Creates an "end" point for "continuous" mark with specified name. + * + * - Returns zero value in case of success, non-zero value otherwise. + * Also returns non-zero value when preceding "begin" point for the + * mark with the same name failed to be created or not created. + * - The mark of "continuous" type is placed to collection results in + * case of success. It appears in overtime view(s) as a special tick + * sign (different from "discrete" mark) together with line from + * corresponding "begin" mark to "end" mark. + * @note Continuous marks can overlap and be nested inside each other. + * Discrete mark can be nested inside marked region + * @param[in] mt - mark, created by __itt_mark_create(const char* name) function + * @return Returns zero value in case of success, non-zero value otherwise. + */ +int ITTAPI __itt_mark_off(__itt_mark_type mt); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, int, mark_off, (__itt_mark_type mt)) +#define __itt_mark_off ITTNOTIFY_DATA(mark_off) +#define __itt_mark_off_ptr ITTNOTIFY_NAME(mark_off) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_mark_off(mt) (int)0 +#define __itt_mark_off_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_mark_off_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Use this if necessary to create an "end" point for mark of process + * @see int __itt_mark_off(__itt_mark_type mt); + */ +int ITTAPI __itt_mark_global_off(__itt_mark_type mt); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, int, mark_global_off, (__itt_mark_type mt)) +#define __itt_mark_global_off ITTNOTIFY_DATA(mark_global_off) +#define __itt_mark_global_off_ptr ITTNOTIFY_NAME(mark_global_off) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_mark_global_off(mt) (int)0 +#define __itt_mark_global_off_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_mark_global_off_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} marks group */ + +/** + * @defgroup counters_internal Counters + * @ingroup internal + * Counters group + * @{ + */ + + +/** + * @defgroup stitch Stack Stitching + * @ingroup internal + * Stack Stitching group + * @{ + */ +/** + * @brief opaque structure for counter identification + */ +typedef struct ___itt_caller *__itt_caller; + +/** + * @brief Create the stitch point e.g. a point in call stack where other stacks should be stitched to. + * The function returns a unique identifier which is used to match the cut points with corresponding stitch points. + */ +__itt_caller ITTAPI __itt_stack_caller_create(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUB(ITTAPI, __itt_caller, stack_caller_create, (void)) +#define __itt_stack_caller_create ITTNOTIFY_DATA(stack_caller_create) +#define __itt_stack_caller_create_ptr ITTNOTIFY_NAME(stack_caller_create) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_stack_caller_create() (__itt_caller)0 +#define __itt_stack_caller_create_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_stack_caller_create_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Destroy the inforamtion about stitch point identified by the pointer previously returned by __itt_stack_caller_create() + */ +void ITTAPI __itt_stack_caller_destroy(__itt_caller id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, stack_caller_destroy, (__itt_caller id)) +#define __itt_stack_caller_destroy ITTNOTIFY_VOID(stack_caller_destroy) +#define __itt_stack_caller_destroy_ptr ITTNOTIFY_NAME(stack_caller_destroy) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_stack_caller_destroy(id) +#define __itt_stack_caller_destroy_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_stack_caller_destroy_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Sets the cut point. Stack from each event which occurs after this call will be cut + * at the same stack level the function was called and stitched to the corresponding stitch point. + */ +void ITTAPI __itt_stack_callee_enter(__itt_caller id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, stack_callee_enter, (__itt_caller id)) +#define __itt_stack_callee_enter ITTNOTIFY_VOID(stack_callee_enter) +#define __itt_stack_callee_enter_ptr ITTNOTIFY_NAME(stack_callee_enter) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_stack_callee_enter(id) +#define __itt_stack_callee_enter_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_stack_callee_enter_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief This function eliminates the cut point which was set by latest __itt_stack_callee_enter(). + */ +void ITTAPI __itt_stack_callee_leave(__itt_caller id); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, stack_callee_leave, (__itt_caller id)) +#define __itt_stack_callee_leave ITTNOTIFY_VOID(stack_callee_leave) +#define __itt_stack_callee_leave_ptr ITTNOTIFY_NAME(stack_callee_leave) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_stack_callee_leave(id) +#define __itt_stack_callee_leave_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_stack_callee_leave_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @} stitch group */ + +/* ***************************************************************************************************************************** */ + +#include <stdarg.h> + +/** @cond exclude_from_documentation */ +typedef enum __itt_error_code +{ + __itt_error_success = 0, /*!< no error */ + __itt_error_no_module = 1, /*!< module can't be loaded */ + /* %1$s -- library name; win: %2$d -- system error code; unx: %2$s -- system error message. */ + __itt_error_no_symbol = 2, /*!< symbol not found */ + /* %1$s -- library name, %2$s -- symbol name. */ + __itt_error_unknown_group = 3, /*!< unknown group specified */ + /* %1$s -- env var name, %2$s -- group name. */ + __itt_error_cant_read_env = 4, /*!< GetEnvironmentVariable() failed */ + /* %1$s -- env var name, %2$d -- system error. */ + __itt_error_env_too_long = 5, /*!< variable value too long */ + /* %1$s -- env var name, %2$d -- actual length of the var, %3$d -- max allowed length. */ + __itt_error_system = 6 /*!< pthread_mutexattr_init or pthread_mutex_init failed */ + /* %1$s -- function name, %2$d -- errno. */ +} __itt_error_code; + +typedef void (__itt_error_handler_t)(__itt_error_code code, va_list); +__itt_error_handler_t* __itt_set_error_handler(__itt_error_handler_t*); + +const char* ITTAPI __itt_api_version(void); +/** @endcond */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#define __itt_error_handler ITT_JOIN(INTEL_ITTNOTIFY_PREFIX, error_handler) +void __itt_error_handler(__itt_error_code code, va_list args); +extern const int ITTNOTIFY_NAME(err); +#define __itt_err ITTNOTIFY_NAME(err) +ITT_STUB(ITTAPI, const char*, api_version, (void)) +#define __itt_api_version ITTNOTIFY_DATA(api_version) +#define __itt_api_version_ptr ITTNOTIFY_NAME(api_version) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_api_version() (const char*)0 +#define __itt_api_version_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_api_version_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _ITTNOTIFY_PRIVATE_ */ + +#endif /* INTEL_ITTNOTIFY_API_PRIVATE */ |