From 6bf0a5cb5034a7e684dcc3500e841785237ce2dd Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 19:32:43 +0200 Subject: Adding upstream version 1:115.7.0. Signed-off-by: Daniel Baumann --- tools/profiler/public/ChildProfilerController.h | 71 ++ tools/profiler/public/GeckoProfiler.h | 435 ++++++++ tools/profiler/public/GeckoProfilerReporter.h | 26 + tools/profiler/public/GeckoTraceEvent.h | 1060 ++++++++++++++++++++ tools/profiler/public/MicroGeckoProfiler.h | 130 +++ .../profiler/public/ProfileAdditionalInformation.h | 90 ++ ...rofileBufferEntrySerializationGeckoExtensions.h | 160 +++ tools/profiler/public/ProfileJSONWriter.h | 19 + tools/profiler/public/ProfilerBindings.h | 162 +++ tools/profiler/public/ProfilerChild.h | 106 ++ tools/profiler/public/ProfilerCodeAddressService.h | 52 + tools/profiler/public/ProfilerControl.h | 190 ++++ tools/profiler/public/ProfilerCounts.h | 296 ++++++ tools/profiler/public/ProfilerLabels.h | 268 +++++ tools/profiler/public/ProfilerMarkerTypes.h | 41 + tools/profiler/public/ProfilerMarkers.h | 355 +++++++ tools/profiler/public/ProfilerMarkersDetail.h | 31 + .../profiler/public/ProfilerMarkersPrerequisites.h | 31 + tools/profiler/public/ProfilerParent.h | 119 +++ tools/profiler/public/ProfilerRunnable.h | 68 ++ tools/profiler/public/ProfilerRustBindings.h | 12 + tools/profiler/public/ProfilerState.h | 399 ++++++++ tools/profiler/public/ProfilerThreadPlatformData.h | 80 ++ tools/profiler/public/ProfilerThreadRegistration.h | 367 +++++++ .../public/ProfilerThreadRegistrationData.h | 537 ++++++++++ .../public/ProfilerThreadRegistrationInfo.h | 64 ++ tools/profiler/public/ProfilerThreadRegistry.h | 321 ++++++ tools/profiler/public/ProfilerThreadSleep.h | 58 ++ tools/profiler/public/ProfilerThreadState.h | 128 +++ tools/profiler/public/ProfilerUtils.h | 32 + tools/profiler/public/shared-libraries.h | 213 ++++ 31 files changed, 5921 insertions(+) create mode 100644 tools/profiler/public/ChildProfilerController.h create mode 100644 tools/profiler/public/GeckoProfiler.h create mode 100644 tools/profiler/public/GeckoProfilerReporter.h create mode 100644 tools/profiler/public/GeckoTraceEvent.h create mode 100644 tools/profiler/public/MicroGeckoProfiler.h create mode 100644 tools/profiler/public/ProfileAdditionalInformation.h create mode 100644 tools/profiler/public/ProfileBufferEntrySerializationGeckoExtensions.h create mode 100644 tools/profiler/public/ProfileJSONWriter.h create mode 100644 tools/profiler/public/ProfilerBindings.h create mode 100644 tools/profiler/public/ProfilerChild.h create mode 100644 tools/profiler/public/ProfilerCodeAddressService.h create mode 100644 tools/profiler/public/ProfilerControl.h create mode 100644 tools/profiler/public/ProfilerCounts.h create mode 100644 tools/profiler/public/ProfilerLabels.h create mode 100644 tools/profiler/public/ProfilerMarkerTypes.h create mode 100644 tools/profiler/public/ProfilerMarkers.h create mode 100644 tools/profiler/public/ProfilerMarkersDetail.h create mode 100644 tools/profiler/public/ProfilerMarkersPrerequisites.h create mode 100644 tools/profiler/public/ProfilerParent.h create mode 100644 tools/profiler/public/ProfilerRunnable.h create mode 100644 tools/profiler/public/ProfilerRustBindings.h create mode 100644 tools/profiler/public/ProfilerState.h create mode 100644 tools/profiler/public/ProfilerThreadPlatformData.h create mode 100644 tools/profiler/public/ProfilerThreadRegistration.h create mode 100644 tools/profiler/public/ProfilerThreadRegistrationData.h create mode 100644 tools/profiler/public/ProfilerThreadRegistrationInfo.h create mode 100644 tools/profiler/public/ProfilerThreadRegistry.h create mode 100644 tools/profiler/public/ProfilerThreadSleep.h create mode 100644 tools/profiler/public/ProfilerThreadState.h create mode 100644 tools/profiler/public/ProfilerUtils.h create mode 100644 tools/profiler/public/shared-libraries.h (limited to 'tools/profiler/public') diff --git a/tools/profiler/public/ChildProfilerController.h b/tools/profiler/public/ChildProfilerController.h new file mode 100644 index 0000000000..8febc25b65 --- /dev/null +++ b/tools/profiler/public/ChildProfilerController.h @@ -0,0 +1,71 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ChildProfilerController_h +#define ChildProfilerController_h + +#include "base/process.h" +#include "mozilla/Attributes.h" +#include "mozilla/ipc/ProtocolUtils.h" +#include "mozilla/DataMutex.h" +#include "mozilla/RefPtr.h" +#include "nsISupportsImpl.h" +#include "nsStringFwd.h" +#include "ProfileAdditionalInformation.h" + +namespace mozilla { + +class ProfilerChild; +class PProfilerChild; +class PProfilerParent; + +// ChildProfilerController manages the setup and teardown of ProfilerChild. +// It's used on the main thread. +// It manages a background thread that ProfilerChild runs on. +class ChildProfilerController final { + public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ChildProfilerController) + +#ifdef MOZ_GECKO_PROFILER + static already_AddRefed Create( + mozilla::ipc::Endpoint&& aEndpoint); + + [[nodiscard]] ProfileAndAdditionalInformation + GrabShutdownProfileAndShutdown(); + void Shutdown(); + + private: + ChildProfilerController(); + ~ChildProfilerController(); + void Init(mozilla::ipc::Endpoint&& aEndpoint); + void ShutdownAndMaybeGrabShutdownProfileFirst( + ProfileAndAdditionalInformation* aOutShutdownProfileInformation); + + // Called on mThread: + void SetupProfilerChild(mozilla::ipc::Endpoint&& aEndpoint); + void ShutdownProfilerChild( + ProfileAndAdditionalInformation* aOutShutdownProfileInformation); + + RefPtr mProfilerChild; // only accessed on mThread + DataMutex> mThread; +#else + static already_AddRefed Create( + mozilla::ipc::Endpoint&& aEndpoint) { + return nullptr; + } + [[nodiscard]] ProfileAndAdditionalInformation + GrabShutdownProfileAndShutdown() { + return ProfileAndAdditionalInformation(std::move(EmptyCString())); + } + void Shutdown() {} + + private: + ~ChildProfilerController() {} +#endif // MOZ_GECKO_PROFILER +}; + +} // namespace mozilla + +#endif // ChildProfilerController_h diff --git a/tools/profiler/public/GeckoProfiler.h b/tools/profiler/public/GeckoProfiler.h new file mode 100644 index 0000000000..f7c045297e --- /dev/null +++ b/tools/profiler/public/GeckoProfiler.h @@ -0,0 +1,435 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// The Gecko Profiler is an always-on profiler that takes fast and low overhead +// samples of the program execution using only userspace functionality for +// portability. The goal of this module is to provide performance data in a +// generic cross-platform way without requiring custom tools or kernel support. +// +// Samples are collected to form a timeline with optional timeline event +// (markers) used for filtering. The samples include both native stacks and +// platform-independent "label stack" frames. + +#ifndef GeckoProfiler_h +#define GeckoProfiler_h + +// Everything in here is also safe to include unconditionally, and only defines +// empty macros if MOZ_GECKO_PROFILER is unset. +// If your file only uses particular APIs (e.g., only markers), please consider +// including only the needed headers instead of this one, to reduce compilation +// dependencies. +#include "BaseProfiler.h" +#include "ProfileAdditionalInformation.h" +#include "mozilla/ProfilerCounts.h" +#include "mozilla/ProfilerLabels.h" +#include "mozilla/ProfilerMarkers.h" +#include "mozilla/ProfilerState.h" +#include "mozilla/ProfilerThreadSleep.h" +#include "mozilla/ProfilerThreadState.h" +#include "mozilla/ProgressLogger.h" +#include "mozilla/Result.h" +#include "mozilla/ResultVariant.h" + +#ifndef MOZ_GECKO_PROFILER + +# include "mozilla/UniquePtr.h" + +// This file can be #included unconditionally. However, everything within this +// file must be guarded by a #ifdef MOZ_GECKO_PROFILER, *except* for the +// following macros and functions, which encapsulate the most common operations +// and thus avoid the need for many #ifdefs. + +# define PROFILER_REGISTER_THREAD(name) +# define PROFILER_UNREGISTER_THREAD() +# define AUTO_PROFILER_REGISTER_THREAD(name) + +# define PROFILER_JS_INTERRUPT_CALLBACK() + +# define PROFILER_SET_JS_CONTEXT(cx) +# define PROFILER_CLEAR_JS_CONTEXT() + +// Function stubs for when MOZ_GECKO_PROFILER is not defined. + +// This won't be used, it's just there to allow the empty definition of +// `profiler_get_backtrace`. +struct ProfilerBacktrace {}; +using UniqueProfilerBacktrace = mozilla::UniquePtr; + +// Get/Capture-backtrace functions can return nullptr or false, the result +// should be fed to another empty macro or stub anyway. + +static inline UniqueProfilerBacktrace profiler_get_backtrace() { + return nullptr; +} + +// This won't be used, it's just there to allow the empty definitions of +// `profiler_capture_backtrace_into` and `profiler_capture_backtrace`. +struct ProfileChunkedBuffer {}; + +static inline bool profiler_capture_backtrace_into( + mozilla::ProfileChunkedBuffer& aChunkedBuffer, + mozilla::StackCaptureOptions aCaptureOptions) { + return false; +} +static inline mozilla::UniquePtr +profiler_capture_backtrace() { + return nullptr; +} + +static inline void profiler_set_process_name( + const nsACString& aProcessName, const nsACString* aETLDplus1 = nullptr) {} + +static inline void profiler_received_exit_profile( + const nsACString& aExitProfile) {} + +static inline void profiler_register_page(uint64_t aTabID, + uint64_t aInnerWindowID, + const nsCString& aUrl, + uint64_t aEmbedderInnerWindowID, + bool aIsPrivateBrowsing) {} +static inline void profiler_unregister_page(uint64_t aRegisteredInnerWindowID) { +} + +static inline void GetProfilerEnvVarsForChildProcess( + std::function&& aSetEnv) {} + +static inline void profiler_record_wakeup_count( + const nsACString& aProcessType) {} + +#else // !MOZ_GECKO_PROFILER + +# include "js/ProfilingStack.h" +# include "mozilla/Assertions.h" +# include "mozilla/Atomics.h" +# include "mozilla/Attributes.h" +# include "mozilla/BaseProfilerRAIIMacro.h" +# include "mozilla/Maybe.h" +# include "mozilla/PowerOfTwo.h" +# include "mozilla/ThreadLocal.h" +# include "mozilla/TimeStamp.h" +# include "mozilla/UniquePtr.h" +# include "nscore.h" +# include "nsINamed.h" +# include "nsString.h" +# include "nsThreadUtils.h" + +# include +# include + +class ProfilerBacktrace; +class ProfilerCodeAddressService; +struct JSContext; + +namespace mozilla { +class ProfileBufferControlledChunkManager; +class ProfileChunkedBuffer; +namespace baseprofiler { +class SpliceableJSONWriter; +} // namespace baseprofiler +} // namespace mozilla +class nsIURI; + +enum class ProfilerError { + IsInactive, + JsonGenerationFailed, +}; + +template +using ProfilerResult = mozilla::Result; + +//--------------------------------------------------------------------------- +// Give information to the profiler +//--------------------------------------------------------------------------- + +// Register/unregister threads with the profiler. Both functions operate the +// same whether the profiler is active or inactive. +# define PROFILER_REGISTER_THREAD(name) \ + do { \ + char stackTop; \ + profiler_register_thread(name, &stackTop); \ + } while (0) +# define PROFILER_UNREGISTER_THREAD() profiler_unregister_thread() +ProfilingStack* profiler_register_thread(const char* name, void* guessStackTop); +void profiler_unregister_thread(); + +// Registers a DOM Window (the JS global `window`) with the profiler. Each +// Window _roughly_ corresponds to a single document loaded within a +// browsing context. Both the Window Id and Browser Id are recorded to allow +// correlating different Windows loaded within the same tab or frame element. +// +// We register pages for each navigations but we do not register +// history.pushState or history.replaceState since they correspond to the same +// Inner Window ID. When a browsing context is first loaded, the first url +// loaded in it will be about:blank. Because of that, this call keeps the first +// non-about:blank registration of window and discards the previous one. +// +// "aTabID" is the BrowserId of that document belongs to. +// That's used to determine the tab of that page. +// "aInnerWindowID" is the ID of the `window` global object of that +// document. +// "aUrl" is the URL of the page. +// "aEmbedderInnerWindowID" is the inner window id of embedder. It's used to +// determine sub documents of a page. +// "aIsPrivateBrowsing" is true if this browsing context happens in a +// private browsing context. +void profiler_register_page(uint64_t aTabID, uint64_t aInnerWindowID, + const nsCString& aUrl, + uint64_t aEmbedderInnerWindowID, + bool aIsPrivateBrowsing); +// Unregister page with the profiler. +// +// Take a Inner Window ID and unregister the page entry that has the same ID. +void profiler_unregister_page(uint64_t aRegisteredInnerWindowID); + +// Remove all registered and unregistered pages in the profiler. +void profiler_clear_all_pages(); + +class BaseProfilerCount; +void profiler_add_sampled_counter(BaseProfilerCount* aCounter); +void profiler_remove_sampled_counter(BaseProfilerCount* aCounter); + +// Register and unregister a thread within a scope. +# define AUTO_PROFILER_REGISTER_THREAD(name) \ + mozilla::AutoProfilerRegisterThread PROFILER_RAII(name) + +enum class SamplingState { + JustStopped, // Sampling loop has just stopped without sampling, between the + // callback registration and now. + SamplingPaused, // Profiler is active but sampling loop has gone through a + // pause. + NoStackSamplingCompleted, // A full sampling loop has completed in + // no-stack-sampling mode. + SamplingCompleted // A full sampling loop has completed. +}; + +using PostSamplingCallback = std::function; + +// Install a callback to be invoked at the end of the next sampling loop. +// - `false` if profiler is not active, `aCallback` will stay untouched. +// - `true` if `aCallback` was successfully moved-from into internal storage, +// and *will* be invoked at the end of the next sampling cycle. Note that this +// will happen on the Sampler thread, and will block further sampling, so +// please be mindful not to block for a long time (e.g., just dispatch a +// runnable to another thread.) Calling profiler functions from the callback +// is allowed. +[[nodiscard]] bool profiler_callback_after_sampling( + PostSamplingCallback&& aCallback); + +// Called by the JSRuntime's operation callback. This is used to start profiling +// on auxiliary threads. Operates the same whether the profiler is active or +// not. +# define PROFILER_JS_INTERRUPT_CALLBACK() profiler_js_interrupt_callback() +void profiler_js_interrupt_callback(); + +// Set and clear the current thread's JSContext. +# define PROFILER_SET_JS_CONTEXT(cx) profiler_set_js_context(cx) +# define PROFILER_CLEAR_JS_CONTEXT() profiler_clear_js_context() +void profiler_set_js_context(JSContext* aCx); +void profiler_clear_js_context(); + +//--------------------------------------------------------------------------- +// Get information from the profiler +//--------------------------------------------------------------------------- + +// Get the chunk manager used in the current profiling session, or null. +mozilla::ProfileBufferControlledChunkManager* +profiler_get_controlled_chunk_manager(); + +// The number of milliseconds since the process started. Operates the same +// whether the profiler is active or inactive. +double profiler_time(); + +// An object of this class is passed to profiler_suspend_and_sample_thread(). +// For each stack frame, one of the Collect methods will be called. +class ProfilerStackCollector { + public: + // Some collectors need to worry about possibly overwriting previous + // generations of data. If that's not an issue, this can return Nothing, + // which is the default behaviour. + virtual mozilla::Maybe SamplePositionInBuffer() { + return mozilla::Nothing(); + } + virtual mozilla::Maybe BufferRangeStart() { + return mozilla::Nothing(); + } + + // This method will be called once if the thread being suspended is the main + // thread. Default behaviour is to do nothing. + virtual void SetIsMainThread() {} + + // WARNING: The target thread is suspended when the Collect methods are + // called. Do not try to allocate or acquire any locks, or you could + // deadlock. The target thread will have resumed by the time this function + // returns. + + virtual void CollectNativeLeafAddr(void* aAddr) = 0; + + virtual void CollectJitReturnAddr(void* aAddr) = 0; + + virtual void CollectWasmFrame(const char* aLabel) = 0; + + virtual void CollectProfilingStackFrame( + const js::ProfilingStackFrame& aFrame) = 0; +}; + +// This method suspends the thread identified by aThreadId, samples its +// profiling stack, JS stack, and (optionally) native stack, passing the +// collected frames into aCollector. aFeatures dictates which compiler features +// are used. |Leaf| is the only relevant one. +// Use `ProfilerThreadId{}` (unspecified) to sample the current thread. +void profiler_suspend_and_sample_thread(ProfilerThreadId aThreadId, + uint32_t aFeatures, + ProfilerStackCollector& aCollector, + bool aSampleNative = true); + +struct ProfilerBacktraceDestructor { + void operator()(ProfilerBacktrace*); +}; + +using UniqueProfilerBacktrace = + mozilla::UniquePtr; + +// Immediately capture the current thread's call stack, store it in the provided +// buffer (usually to avoid allocations if you can construct the buffer on the +// stack). Returns false if unsuccessful, or if the profiler is inactive. +bool profiler_capture_backtrace_into( + mozilla::ProfileChunkedBuffer& aChunkedBuffer, + mozilla::StackCaptureOptions aCaptureOptions); + +// Immediately capture the current thread's call stack, and return it in a +// ProfileChunkedBuffer (usually for later use in MarkerStack::TakeBacktrace()). +// May be null if unsuccessful, or if the profiler is inactive. +mozilla::UniquePtr profiler_capture_backtrace(); + +// Immediately capture the current thread's call stack, and return it in a +// ProfilerBacktrace (usually for later use in marker function that take a +// ProfilerBacktrace). May be null if unsuccessful, or if the profiler is +// inactive. +UniqueProfilerBacktrace profiler_get_backtrace(); + +struct ProfilerStats { + unsigned n = 0; + double sum = 0; + double min = std::numeric_limits::max(); + double max = 0; + void Count(double v) { + ++n; + sum += v; + if (v < min) { + min = v; + } + if (v > max) { + max = v; + } + } +}; + +struct ProfilerBufferInfo { + // Index of the oldest entry. + uint64_t mRangeStart; + // Index of the newest entry. + uint64_t mRangeEnd; + // Buffer capacity in number of 8-byte entries. + uint32_t mEntryCount; + // Sampling stats: Interval between successive samplings. + ProfilerStats mIntervalsUs; + // Sampling stats: Total sampling duration. (Split detail below.) + ProfilerStats mOverheadsUs; + // Sampling stats: Time to acquire the lock before sampling. + ProfilerStats mLockingsUs; + // Sampling stats: Time to discard expired data. + ProfilerStats mCleaningsUs; + // Sampling stats: Time to collect counter data. + ProfilerStats mCountersUs; + // Sampling stats: Time to sample thread stacks. + ProfilerStats mThreadsUs; +}; + +// Get information about the current buffer status. +// Returns Nothing() if the profiler is inactive. +// +// This information may be useful to a user-interface displaying the current +// status of the profiler, allowing the user to get a sense for how fast the +// buffer is being written to, and how much data is visible. +mozilla::Maybe profiler_get_buffer_info(); + +// Record through glean how many times profiler_thread_wake has been +// called. +void profiler_record_wakeup_count(const nsACString& aProcessType); + +//--------------------------------------------------------------------------- +// Output profiles +//--------------------------------------------------------------------------- + +// Set a user-friendly process name, used in JSON stream. Allows an optional +// detailed name which may include private info (eTLD+1 in fission) +void profiler_set_process_name(const nsACString& aProcessName, + const nsACString* aETLDplus1 = nullptr); + +// Record an exit profile from a child process. +void profiler_received_exit_profile(const nsACString& aExitProfile); + +// Get the profile encoded as a JSON string. A no-op (returning nullptr) if the +// profiler is inactive. +// If aIsShuttingDown is true, the current time is included as the process +// shutdown time in the JSON's "meta" object. +mozilla::UniquePtr profiler_get_profile(double aSinceTime = 0, + bool aIsShuttingDown = false); + +// Write the profile for this process (excluding subprocesses) into aWriter. +// Returns a failed result if the profiler is inactive. +ProfilerResult +profiler_stream_json_for_this_process( + mozilla::baseprofiler::SpliceableJSONWriter& aWriter, double aSinceTime = 0, + bool aIsShuttingDown = false, + ProfilerCodeAddressService* aService = nullptr, + mozilla::ProgressLogger aProgressLogger = {}); + +// Get the profile and write it into a file. A no-op if the profile is +// inactive. +// +// This function is 'extern "C"' so that it is easily callable from a debugger +// in a build without debugging information (a workaround for +// http://llvm.org/bugs/show_bug.cgi?id=22211). +extern "C" { +void profiler_save_profile_to_file(const char* aFilename); +} + +//--------------------------------------------------------------------------- +// RAII classes +//--------------------------------------------------------------------------- + +namespace mozilla { + +// Convenience class to register and unregister a thread with the profiler. +// Needs to be the first object on the stack of the thread. +class MOZ_RAII AutoProfilerRegisterThread final { + public: + explicit AutoProfilerRegisterThread(const char* aName) { + profiler_register_thread(aName, this); + } + + ~AutoProfilerRegisterThread() { profiler_unregister_thread(); } + + private: + AutoProfilerRegisterThread(const AutoProfilerRegisterThread&) = delete; + AutoProfilerRegisterThread& operator=(const AutoProfilerRegisterThread&) = + delete; +}; + +// Get the MOZ_PROFILER_STARTUP* environment variables that should be +// supplied to a child process that is about to be launched, in order +// to make that child process start with the same profiler settings as +// in the current process. The given function is invoked once for +// each variable to be set. +void GetProfilerEnvVarsForChildProcess( + std::function&& aSetEnv); + +} // namespace mozilla + +#endif // !MOZ_GECKO_PROFILER + +#endif // GeckoProfiler_h diff --git a/tools/profiler/public/GeckoProfilerReporter.h b/tools/profiler/public/GeckoProfilerReporter.h new file mode 100644 index 0000000000..f5bf41f223 --- /dev/null +++ b/tools/profiler/public/GeckoProfilerReporter.h @@ -0,0 +1,26 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef GeckoProfilerReporter_h +#define GeckoProfilerReporter_h + +#include "nsIMemoryReporter.h" + +class GeckoProfilerReporter final : public nsIMemoryReporter { + public: + NS_DECL_ISUPPORTS + + GeckoProfilerReporter() {} + + NS_IMETHOD + CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData, + bool aAnonymize) override; + + private: + ~GeckoProfilerReporter() {} +}; + +#endif diff --git a/tools/profiler/public/GeckoTraceEvent.h b/tools/profiler/public/GeckoTraceEvent.h new file mode 100644 index 0000000000..75affaf9c8 --- /dev/null +++ b/tools/profiler/public/GeckoTraceEvent.h @@ -0,0 +1,1060 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file under third_party_mods/chromium or at: +// http://src.chromium.org/svn/trunk/src/LICENSE + +#ifndef GECKO_TRACE_EVENT_H_ +#define GECKO_TRACE_EVENT_H_ + +#include "MicroGeckoProfiler.h" + +// Extracted from Chromium's src/base/debug/trace_event.h, modified to talk to +// the Gecko profiler. + +#if defined(RTC_DISABLE_TRACE_EVENTS) +# define RTC_TRACE_EVENTS_ENABLED 0 +#else +# define RTC_TRACE_EVENTS_ENABLED 1 +#endif + +// Type values for identifying types in the TraceValue union. +#define TRACE_VALUE_TYPE_BOOL (static_cast(1)) +#define TRACE_VALUE_TYPE_UINT (static_cast(2)) +#define TRACE_VALUE_TYPE_INT (static_cast(3)) +#define TRACE_VALUE_TYPE_DOUBLE (static_cast(4)) +#define TRACE_VALUE_TYPE_POINTER (static_cast(5)) +#define TRACE_VALUE_TYPE_STRING (static_cast(6)) +#define TRACE_VALUE_TYPE_COPY_STRING (static_cast(7)) + +#if RTC_TRACE_EVENTS_ENABLED + +// This header is designed to give you trace_event macros without specifying +// how the events actually get collected and stored. If you need to expose trace +// event to some other universe, you can copy-and-paste this file, +// implement the TRACE_EVENT_API macros, and do any other necessary fixup for +// the target platform. The end result is that multiple libraries can funnel +// events through to a shared trace event collector. + +// Trace events are for tracking application performance and resource usage. +// Macros are provided to track: +// Begin and end of function calls +// Counters +// +// Events are issued against categories. Whereas RTC_LOG's +// categories are statically defined, TRACE categories are created +// implicitly with a string. For example: +// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent") +// +// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope: +// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly") +// doSomethingCostly() +// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly") +// Note: our tools can't always determine the correct BEGIN/END pairs unless +// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you +// need them to be in separate scopes. +// +// A common use case is to trace entire function scopes. This +// issues a trace BEGIN and END automatically: +// void doSomethingCostly() { +// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly"); +// ... +// } +// +// Additional parameters can be associated with an event: +// void doSomethingCostly2(int howMuch) { +// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly", +// "howMuch", howMuch); +// ... +// } +// +// The trace system will automatically add to this information the +// current process id, thread id, and a timestamp in microseconds. +// +// To trace an asynchronous procedure such as an IPC send/receive, use +// ASYNC_BEGIN and ASYNC_END: +// [single threaded sender code] +// static int send_count = 0; +// ++send_count; +// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count); +// Send(new MyMessage(send_count)); +// [receive code] +// void OnMyMessage(send_count) { +// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count); +// } +// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs. +// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process. +// Pointers can be used for the ID parameter, and they will be mangled +// internally so that the same pointer on two different processes will not +// match. For example: +// class MyTracedClass { +// public: +// MyTracedClass() { +// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this); +// } +// ~MyTracedClass() { +// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this); +// } +// } +// +// Trace event also supports counters, which is a way to track a quantity +// as it varies over time. Counters are created with the following macro: +// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue); +// +// Counters are process-specific. The macro itself can be issued from any +// thread, however. +// +// Sometimes, you want to track two counters at once. You can do this with two +// counter macros: +// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]); +// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]); +// Or you can do it with a combined macro: +// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter", +// "bytesPinned", g_myCounterValue[0], +// "bytesAllocated", g_myCounterValue[1]); +// This indicates to the tracing UI that these counters should be displayed +// in a single graph, as a summed area chart. +// +// Since counters are in a global namespace, you may want to disembiguate with a +// unique ID, by using the TRACE_COUNTER_ID* variations. +// +// By default, trace collection is compiled in, but turned off at runtime. +// Collecting trace data is the responsibility of the embedding +// application. In Chrome's case, navigating to about:tracing will turn on +// tracing and display data collected across all active processes. +// +// +// Memory scoping note: +// Tracing copies the pointers, not the string content, of the strings passed +// in for category, name, and arg_names. Thus, the following code will +// cause problems: +// char* str = strdup("impprtantName"); +// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD! +// free(str); // Trace system now has dangling pointer +// +// To avoid this issue with the `name` and `arg_name` parameters, use the +// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead. +// Notes: The category must always be in a long-lived char* (i.e. static const). +// The `arg_values`, when used, are always deep copied with the _COPY +// macros. +// +// When are string argument values copied: +// const char* arg_values are only referenced by default: +// TRACE_EVENT1("category", "name", +// "arg1", "literal string is only referenced"); +// Use TRACE_STR_COPY to force copying of a const char*: +// TRACE_EVENT1("category", "name", +// "arg1", TRACE_STR_COPY("string will be copied")); +// std::string arg_values are always copied: +// TRACE_EVENT1("category", "name", +// "arg1", std::string("string will be copied")); +// +// +// Thread Safety: +// Thread safety is provided by methods defined in event_tracer.h. See the file +// for details. + +// By default, const char* argument values are assumed to have long-lived scope +// and will not be copied. Use this macro to force a const char* to be copied. +# define TRACE_STR_COPY(str) \ + webrtc::trace_event_internal::TraceStringWithCopy(str) + +// This will mark the trace event as disabled by default. The user will need +// to explicitly enable the event. +# define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name + +// By default, uint64 ID argument values are not mangled with the Process ID in +// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling. +# define TRACE_ID_MANGLE(id) \ + webrtc::trace_event_internal::TraceID::ForceMangle(id) + +// Records a pair of begin and end events called "name" for the current +// scope, with 0, 1 or 2 associated arguments. If the category is not +// enabled, then this does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +# define TRACE_EVENT0(category, name) \ + INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name) +# define TRACE_EVENT1(category, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name, arg1_name, arg1_val) +# define TRACE_EVENT2(category, name, arg1_name, arg1_val, arg2_name, \ + arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) + +// Records a single event called "name" immediately, with 0, 1 or 2 +// associated arguments. If the category is not enabled, then this +// does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +# define TRACE_EVENT_INSTANT0(category, name) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category, name, \ + TRACE_EVENT_FLAG_NONE) +# define TRACE_EVENT_INSTANT1(category, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category, name, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +# define TRACE_EVENT_INSTANT2(category, name, arg1_name, arg1_val, arg2_name, \ + arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category, name, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \ + arg2_name, arg2_val) +# define TRACE_EVENT_COPY_INSTANT0(category, name) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category, name, \ + TRACE_EVENT_FLAG_COPY) +# define TRACE_EVENT_COPY_INSTANT1(category, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category, name, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +# define TRACE_EVENT_COPY_INSTANT2(category, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category, name, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ + arg2_name, arg2_val) + +// Records a single BEGIN event called "name" immediately, with 0, 1 or 2 +// associated arguments. If the category is not enabled, then this +// does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +# define TRACE_EVENT_BEGIN0(category, name) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category, name, \ + TRACE_EVENT_FLAG_NONE) +# define TRACE_EVENT_BEGIN1(category, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category, name, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +# define TRACE_EVENT_BEGIN2(category, name, arg1_name, arg1_val, arg2_name, \ + arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category, name, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \ + arg2_name, arg2_val) +# define TRACE_EVENT_COPY_BEGIN0(category, name) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category, name, \ + TRACE_EVENT_FLAG_COPY) +# define TRACE_EVENT_COPY_BEGIN1(category, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category, name, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +# define TRACE_EVENT_COPY_BEGIN2(category, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category, name, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ + arg2_name, arg2_val) + +// Records a single END event for "name" immediately. If the category +// is not enabled, then this does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +# define TRACE_EVENT_END0(category, name) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category, name, \ + TRACE_EVENT_FLAG_NONE) +# define TRACE_EVENT_END1(category, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category, name, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +# define TRACE_EVENT_END2(category, name, arg1_name, arg1_val, arg2_name, \ + arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category, name, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \ + arg2_name, arg2_val) +# define TRACE_EVENT_COPY_END0(category, name) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category, name, \ + TRACE_EVENT_FLAG_COPY) +# define TRACE_EVENT_COPY_END1(category, name, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category, name, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +# define TRACE_EVENT_COPY_END2(category, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category, name, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ + arg2_name, arg2_val) + +// Records the value of a counter called "name" immediately. Value +// must be representable as a 32 bit integer. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +# define TRACE_COUNTER1(category, name, value) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category, name, \ + TRACE_EVENT_FLAG_NONE, "value", \ + static_cast(value)) +# define TRACE_COPY_COUNTER1(category, name, value) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category, name, \ + TRACE_EVENT_FLAG_COPY, "value", \ + static_cast(value)) + +// Records the values of a multi-parted counter called "name" immediately. +// The UI will treat value1 and value2 as parts of a whole, displaying their +// values as a stacked-bar chart. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +# define TRACE_COUNTER2(category, name, value1_name, value1_val, value2_name, \ + value2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category, name, \ + TRACE_EVENT_FLAG_NONE, value1_name, \ + static_cast(value1_val), value2_name, \ + static_cast(value2_val)) +# define TRACE_COPY_COUNTER2(category, name, value1_name, value1_val, \ + value2_name, value2_val) \ + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category, name, \ + TRACE_EVENT_FLAG_COPY, value1_name, \ + static_cast(value1_val), value2_name, \ + static_cast(value2_val)) + +// Records the value of a counter called "name" immediately. Value +// must be representable as a 32 bit integer. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +// - `id` is used to disambiguate counters with the same name. It must either +// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits +// will be xored with a hash of the process ID so that the same pointer on +// two different processes will not collide. +# define TRACE_COUNTER_ID1(category, name, id, value) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category, \ + name, id, TRACE_EVENT_FLAG_NONE, "value", \ + static_cast(value)) +# define TRACE_COPY_COUNTER_ID1(category, name, id, value) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category, \ + name, id, TRACE_EVENT_FLAG_COPY, "value", \ + static_cast(value)) + +// Records the values of a multi-parted counter called "name" immediately. +// The UI will treat value1 and value2 as parts of a whole, displaying their +// values as a stacked-bar chart. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +// - `id` is used to disambiguate counters with the same name. It must either +// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits +// will be xored with a hash of the process ID so that the same pointer on +// two different processes will not collide. +# define TRACE_COUNTER_ID2(category, name, id, value1_name, value1_val, \ + value2_name, value2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_COUNTER, category, name, id, TRACE_EVENT_FLAG_NONE, \ + value1_name, static_cast(value1_val), value2_name, \ + static_cast(value2_val)) +# define TRACE_COPY_COUNTER_ID2(category, name, id, value1_name, value1_val, \ + value2_name, value2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_COUNTER, category, name, id, TRACE_EVENT_FLAG_COPY, \ + value1_name, static_cast(value1_val), value2_name, \ + static_cast(value2_val)) + +// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2 +// associated arguments. If the category is not enabled, then this +// does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +// - `id` is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC +// events are considered to match if their category, name and id values all +// match. `id` must either be a pointer or an integer value up to 64 bits. If +// it's a pointer, the bits will be xored with a hash of the process ID so +// that the same pointer on two different processes will not collide. +// An asynchronous operation can consist of multiple phases. The first phase is +// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the +// ASYNC_STEP macros. When the operation completes, call ASYNC_END. +// An ASYNC trace typically occur on a single thread (if not, they will only be +// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that +// operation must use the same `name` and `id`. Each event can have its own +// args. +# define TRACE_EVENT_ASYNC_BEGIN0(category, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_NONE) +# define TRACE_EVENT_ASYNC_BEGIN1(category, name, id, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val) +# define TRACE_EVENT_ASYNC_BEGIN2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val, arg2_name, arg2_val) +# define TRACE_EVENT_COPY_ASYNC_BEGIN0(category, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_COPY) +# define TRACE_EVENT_COPY_ASYNC_BEGIN1(category, name, id, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_COPY, \ + arg1_name, arg1_val) +# define TRACE_EVENT_COPY_ASYNC_BEGIN2(category, name, id, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_COPY, \ + arg1_name, arg1_val, arg2_name, arg2_val) + +// Records a single ASYNC_STEP event for `step` immediately. If the category +// is not enabled, then this does nothing. The `name` and `id` must match the +// ASYNC_BEGIN event above. The `step` param identifies this step within the +// async event. This should be called at the beginning of the next phase of an +// asynchronous operation. +# define TRACE_EVENT_ASYNC_STEP0(category, name, id, step) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP, category, \ + name, id, TRACE_EVENT_FLAG_NONE, "step", \ + step) +# define TRACE_EVENT_ASYNC_STEP1(category, name, id, step, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP, category, \ + name, id, TRACE_EVENT_FLAG_NONE, "step", \ + step, arg1_name, arg1_val) +# define TRACE_EVENT_COPY_ASYNC_STEP0(category, name, id, step) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP, category, \ + name, id, TRACE_EVENT_FLAG_COPY, "step", \ + step) +# define TRACE_EVENT_COPY_ASYNC_STEP1(category, name, id, step, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP, category, \ + name, id, TRACE_EVENT_FLAG_COPY, "step", \ + step, arg1_name, arg1_val) + +// Records a single ASYNC_END event for "name" immediately. If the category +// is not enabled, then this does nothing. +# define TRACE_EVENT_ASYNC_END0(category, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, category, \ + name, id, TRACE_EVENT_FLAG_NONE) +# define TRACE_EVENT_ASYNC_END1(category, name, id, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, category, \ + name, id, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val) +# define TRACE_EVENT_ASYNC_END2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, category, \ + name, id, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val, arg2_name, arg2_val) +# define TRACE_EVENT_COPY_ASYNC_END0(category, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, category, \ + name, id, TRACE_EVENT_FLAG_COPY) +# define TRACE_EVENT_COPY_ASYNC_END1(category, name, id, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, category, \ + name, id, TRACE_EVENT_FLAG_COPY, \ + arg1_name, arg1_val) +# define TRACE_EVENT_COPY_ASYNC_END2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, category, \ + name, id, TRACE_EVENT_FLAG_COPY, \ + arg1_name, arg1_val, arg2_name, arg2_val) + +// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2 +// associated arguments. If the category is not enabled, then this +// does nothing. +// - category and name strings must have application lifetime (statics or +// literals). They may not include " chars. +// - `id` is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW +// events are considered to match if their category, name and id values all +// match. `id` must either be a pointer or an integer value up to 64 bits. If +// it's a pointer, the bits will be xored with a hash of the process ID so +// that the same pointer on two different processes will not collide. +// FLOW events are different from ASYNC events in how they are drawn by the +// tracing UI. A FLOW defines asynchronous data flow, such as posting a task +// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be +// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar +// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined +// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP +// macros. When the operation completes, call FLOW_END. An async operation can +// span threads and processes, but all events in that operation must use the +// same `name` and `id`. Each event can have its own args. +# define TRACE_EVENT_FLOW_BEGIN0(category, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_NONE) +# define TRACE_EVENT_FLOW_BEGIN1(category, name, id, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val) +# define TRACE_EVENT_FLOW_BEGIN2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val, arg2_name, arg2_val) +# define TRACE_EVENT_COPY_FLOW_BEGIN0(category, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_COPY) +# define TRACE_EVENT_COPY_FLOW_BEGIN1(category, name, id, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_COPY, \ + arg1_name, arg1_val) +# define TRACE_EVENT_COPY_FLOW_BEGIN2(category, name, id, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, category, \ + name, id, TRACE_EVENT_FLAG_COPY, \ + arg1_name, arg1_val, arg2_name, arg2_val) + +// Records a single FLOW_STEP event for `step` immediately. If the category +// is not enabled, then this does nothing. The `name` and `id` must match the +// FLOW_BEGIN event above. The `step` param identifies this step within the +// async event. This should be called at the beginning of the next phase of an +// asynchronous operation. +# define TRACE_EVENT_FLOW_STEP0(category, name, id, step) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, category, \ + name, id, TRACE_EVENT_FLAG_NONE, "step", \ + step) +# define TRACE_EVENT_FLOW_STEP1(category, name, id, step, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, category, \ + name, id, TRACE_EVENT_FLAG_NONE, "step", \ + step, arg1_name, arg1_val) +# define TRACE_EVENT_COPY_FLOW_STEP0(category, name, id, step) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, category, \ + name, id, TRACE_EVENT_FLAG_COPY, "step", \ + step) +# define TRACE_EVENT_COPY_FLOW_STEP1(category, name, id, step, arg1_name, \ + arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, category, \ + name, id, TRACE_EVENT_FLAG_COPY, "step", \ + step, arg1_name, arg1_val) + +// Records a single FLOW_END event for "name" immediately. If the category +// is not enabled, then this does nothing. +# define TRACE_EVENT_FLOW_END0(category, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category, \ + name, id, TRACE_EVENT_FLAG_NONE) +# define TRACE_EVENT_FLOW_END1(category, name, id, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category, \ + name, id, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val) +# define TRACE_EVENT_FLOW_END2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category, \ + name, id, TRACE_EVENT_FLAG_NONE, \ + arg1_name, arg1_val, arg2_name, arg2_val) +# define TRACE_EVENT_COPY_FLOW_END0(category, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category, \ + name, id, TRACE_EVENT_FLAG_COPY) +# define TRACE_EVENT_COPY_FLOW_END1(category, name, id, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category, \ + name, id, TRACE_EVENT_FLAG_COPY, \ + arg1_name, arg1_val) +# define TRACE_EVENT_COPY_FLOW_END2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category, \ + name, id, TRACE_EVENT_FLAG_COPY, \ + arg1_name, arg1_val, arg2_name, arg2_val) + +//////////////////////////////////////////////////////////////////////////////// +// Implementation specific tracing API definitions. + +// Get a pointer to the enabled state of the given trace category. Only +// long-lived literal strings should be given as the category name. The returned +// pointer can be held permanently in a local static for example. If the +// unsigned char is non-zero, tracing is enabled. If tracing is enabled, +// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled +// between the load of the tracing state and the call to +// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out +// for best performance when tracing is disabled. +// const unsigned char* +// TRACE_EVENT_API_GET_CATEGORY_ENABLED(const char* category_name) +# define TRACE_EVENT_API_GET_CATEGORY_ENABLED \ + webrtc::EventTracer::GetCategoryEnabled + +// Add a trace event to the platform tracing system. +// void TRACE_EVENT_API_ADD_TRACE_EVENT( +// char phase, +// const unsigned char* category_enabled, +// const char* name, +// unsigned long long id, +// int num_args, +// const char** arg_names, +// const unsigned char* arg_types, +// const unsigned long long* arg_values, +// unsigned char flags) +# define TRACE_EVENT_API_ADD_TRACE_EVENT MOZ_INTERNAL_UPROFILER_SIMPLE_EVENT + +//////////////////////////////////////////////////////////////////////////////// + +// Implementation detail: trace event macros create temporary variables +// to keep instrumentation overhead low. These macros give each temporary +// variable a unique name based on the line number to prevent name collissions. +# define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b +# define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b) +# define INTERNAL_TRACE_EVENT_UID(name_prefix) \ + INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__) + +# if WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS +# define INTERNAL_TRACE_EVENT_INFO_TYPE const unsigned char* +# else +# define INTERNAL_TRACE_EVENT_INFO_TYPE static const unsigned char* +# endif // WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS + +// Implementation detail: internal macro to create static category. +# define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category) \ + INTERNAL_TRACE_EVENT_INFO_TYPE INTERNAL_TRACE_EVENT_UID(catstatic) = \ + reinterpret_cast(category); + +// Implementation detail: internal macro to create static category and add +// event if the category is enabled. +# define INTERNAL_TRACE_EVENT_ADD(phase, category, name, flags, ...) \ + do { \ + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category); \ + if (*INTERNAL_TRACE_EVENT_UID(catstatic)) { \ + webrtc::trace_event_internal::AddTraceEvent( \ + phase, INTERNAL_TRACE_EVENT_UID(catstatic), name, \ + webrtc::trace_event_internal::kNoEventId, flags, ##__VA_ARGS__); \ + } \ + } while (0) + +// Implementation detail: internal macro to create static category and add begin +// event if the category is enabled. Also adds the end event when the scope +// ends. +# define INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name, ...) \ + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category); \ + webrtc::trace_event_internal::TraceEndOnScopeClose \ + INTERNAL_TRACE_EVENT_UID(profileScope); \ + if (*INTERNAL_TRACE_EVENT_UID(catstatic)) { \ + webrtc::trace_event_internal::AddTraceEvent( \ + TRACE_EVENT_PHASE_BEGIN, INTERNAL_TRACE_EVENT_UID(catstatic), name, \ + webrtc::trace_event_internal::kNoEventId, TRACE_EVENT_FLAG_NONE, \ + ##__VA_ARGS__); \ + INTERNAL_TRACE_EVENT_UID(profileScope) \ + .Initialize(INTERNAL_TRACE_EVENT_UID(catstatic), name); \ + } + +// Implementation detail: internal macro to create static category and add +// event if the category is enabled. +# define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category, name, id, flags, \ + ...) \ + do { \ + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category); \ + if (*INTERNAL_TRACE_EVENT_UID(catstatic)) { \ + unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \ + webrtc::trace_event_internal::TraceID trace_event_trace_id( \ + id, &trace_event_flags); \ + webrtc::trace_event_internal::AddTraceEvent( \ + phase, INTERNAL_TRACE_EVENT_UID(catstatic), name, \ + trace_event_trace_id.data(), trace_event_flags, ##__VA_ARGS__); \ + } \ + } while (0) + +# ifdef MOZ_GECKO_PROFILER +# define MOZ_INTERNAL_UPROFILER_SIMPLE_EVENT(phase, category_enabled, name, \ + id, num_args, arg_names, \ + arg_types, arg_values, flags) \ + uprofiler_simple_event_marker(name, phase, num_args, arg_names, \ + arg_types, arg_values); +# else +# define MOZ_INTERNAL_UPROFILER_SIMPLE_EVENT(phase, category_enabled, name, \ + id, num_args, arg_names, \ + arg_types, arg_values, flags) +# endif + +// Notes regarding the following definitions: +// New values can be added and propagated to third party libraries, but existing +// definitions must never be changed, because third party libraries may use old +// definitions. + +// Phase indicates the nature of an event entry. E.g. part of a begin/end pair. +# define TRACE_EVENT_PHASE_BEGIN ('B') +# define TRACE_EVENT_PHASE_END ('E') +# define TRACE_EVENT_PHASE_INSTANT ('I') +# define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S') +# define TRACE_EVENT_PHASE_ASYNC_STEP ('T') +# define TRACE_EVENT_PHASE_ASYNC_END ('F') +# define TRACE_EVENT_PHASE_FLOW_BEGIN ('s') +# define TRACE_EVENT_PHASE_FLOW_STEP ('t') +# define TRACE_EVENT_PHASE_FLOW_END ('f') +# define TRACE_EVENT_PHASE_METADATA ('M') +# define TRACE_EVENT_PHASE_COUNTER ('C') + +// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT. +# define TRACE_EVENT_FLAG_NONE (static_cast(0)) +# define TRACE_EVENT_FLAG_COPY (static_cast(1 << 0)) +# define TRACE_EVENT_FLAG_HAS_ID (static_cast(1 << 1)) +# define TRACE_EVENT_FLAG_MANGLE_ID (static_cast(1 << 2)) + +namespace webrtc { +namespace trace_event_internal { + +// Specify these values when the corresponding argument of AddTraceEvent is not +// used. +const int kZeroNumArgs = 0; +const unsigned long long kNoEventId = 0; + +// TraceID encapsulates an ID that can either be an integer or pointer. Pointers +// are mangled with the Process ID so that they are unlikely to collide when the +// same pointer is used on different processes. +class TraceID { + public: + class ForceMangle { + public: + explicit ForceMangle(unsigned long long id) : data_(id) {} + explicit ForceMangle(unsigned long id) : data_(id) {} + explicit ForceMangle(unsigned int id) : data_(id) {} + explicit ForceMangle(unsigned short id) : data_(id) {} + explicit ForceMangle(unsigned char id) : data_(id) {} + explicit ForceMangle(long long id) + : data_(static_cast(id)) {} + explicit ForceMangle(long id) + : data_(static_cast(id)) {} + explicit ForceMangle(int id) : data_(static_cast(id)) {} + explicit ForceMangle(short id) + : data_(static_cast(id)) {} + explicit ForceMangle(signed char id) + : data_(static_cast(id)) {} + + unsigned long long data() const { return data_; } + + private: + unsigned long long data_; + }; + + explicit TraceID(const void* id, unsigned char* flags) + : data_( + static_cast(reinterpret_cast(id))) { + *flags |= TRACE_EVENT_FLAG_MANGLE_ID; + } + explicit TraceID(ForceMangle id, unsigned char* flags) : data_(id.data()) { + *flags |= TRACE_EVENT_FLAG_MANGLE_ID; + } + explicit TraceID(unsigned long long id, unsigned char* flags) : data_(id) { + (void)flags; + } + explicit TraceID(unsigned long id, unsigned char* flags) : data_(id) { + (void)flags; + } + explicit TraceID(unsigned int id, unsigned char* flags) : data_(id) { + (void)flags; + } + explicit TraceID(unsigned short id, unsigned char* flags) : data_(id) { + (void)flags; + } + explicit TraceID(unsigned char id, unsigned char* flags) : data_(id) { + (void)flags; + } + explicit TraceID(long long id, unsigned char* flags) + : data_(static_cast(id)) { + (void)flags; + } + explicit TraceID(long id, unsigned char* flags) + : data_(static_cast(id)) { + (void)flags; + } + explicit TraceID(int id, unsigned char* flags) + : data_(static_cast(id)) { + (void)flags; + } + explicit TraceID(short id, unsigned char* flags) + : data_(static_cast(id)) { + (void)flags; + } + explicit TraceID(signed char id, unsigned char* flags) + : data_(static_cast(id)) { + (void)flags; + } + + unsigned long long data() const { return data_; } + + private: + unsigned long long data_; +}; + +// Simple union to store various types as unsigned long long. +union TraceValueUnion { + bool as_bool; + unsigned long long as_uint; + long long as_int; + double as_double; + const void* as_pointer; + const char* as_string; +}; + +// Simple container for const char* that should be copied instead of retained. +class TraceStringWithCopy { + public: + explicit TraceStringWithCopy(const char* str) : str_(str) {} + operator const char*() const { return str_; } + + private: + const char* str_; +}; + +// Define SetTraceValue for each allowed type. It stores the type and +// value in the return arguments. This allows this API to avoid declaring any +// structures so that it is portable to third_party libraries. +# define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, union_member, \ + value_type_id) \ + static inline void SetTraceValue(actual_type arg, unsigned char* type, \ + unsigned long long* value) { \ + TraceValueUnion type_value; \ + type_value.union_member = arg; \ + *type = value_type_id; \ + *value = type_value.as_uint; \ + } +// Simpler form for int types that can be safely casted. +# define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id) \ + static inline void SetTraceValue(actual_type arg, unsigned char* type, \ + unsigned long long* value) { \ + *type = value_type_id; \ + *value = static_cast(arg); \ + } + +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long long, TRACE_VALUE_TYPE_UINT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long, TRACE_VALUE_TYPE_UINT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT) +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT) +INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL) +INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE) +INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer, + TRACE_VALUE_TYPE_POINTER) +INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string, + TRACE_VALUE_TYPE_STRING) +INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string, + TRACE_VALUE_TYPE_COPY_STRING) + +# undef INTERNAL_DECLARE_SET_TRACE_VALUE +# undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT + +// std::string version of SetTraceValue so that trace arguments can be strings. +static inline void SetTraceValue(const std::string& arg, unsigned char* type, + unsigned long long* value) { + TraceValueUnion type_value; + type_value.as_string = arg.c_str(); + *type = TRACE_VALUE_TYPE_COPY_STRING; + *value = type_value.as_uint; +} + +// These AddTraceEvent template functions are defined here instead of in the +// macro, because the arg_values could be temporary objects, such as +// std::string. In order to store pointers to the internal c_str and pass +// through to the tracing API, the arg_values must live throughout +// these procedures. + +static inline void AddTraceEvent(char phase, + const unsigned char* category_enabled, + const char* name, unsigned long long id, + unsigned char flags) { + TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_enabled, name, id, + kZeroNumArgs, nullptr, nullptr, nullptr, + flags); +} + +template +static inline void AddTraceEvent(char phase, + const unsigned char* category_enabled, + const char* name, unsigned long long id, + unsigned char flags, const char* arg1_name, + const ARG1_TYPE& arg1_val) { + const int num_args = 1; + unsigned char arg_types[1]; + unsigned long long arg_values[1]; + SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]); + TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_enabled, name, id, num_args, + &arg1_name, arg_types, arg_values, flags); +} + +template +static inline void AddTraceEvent(char phase, + const unsigned char* category_enabled, + const char* name, unsigned long long id, + unsigned char flags, const char* arg1_name, + const ARG1_TYPE& arg1_val, + const char* arg2_name, + const ARG2_TYPE& arg2_val) { + const int num_args = 2; + const char* arg_names[2] = {arg1_name, arg2_name}; + unsigned char arg_types[2]; + unsigned long long arg_values[2]; + SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]); + SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]); + TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_enabled, name, id, num_args, + arg_names, arg_types, arg_values, flags); +} + +// Used by TRACE_EVENTx macro. Do not use directly. +class TraceEndOnScopeClose { + public: + // Note: members of data_ intentionally left uninitialized. See Initialize. + TraceEndOnScopeClose() : p_data_(nullptr) {} + ~TraceEndOnScopeClose() { + if (p_data_) AddEventIfEnabled(); + } + + void Initialize(const unsigned char* category_enabled, const char* name) { + data_.category_enabled = category_enabled; + data_.name = name; + p_data_ = &data_; + } + + private: + // Add the end event if the category is still enabled. + void AddEventIfEnabled() { + // Only called when p_data_ is non-null. + if (*p_data_->category_enabled) { + TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_END, + p_data_->category_enabled, p_data_->name, + kNoEventId, kZeroNumArgs, nullptr, + nullptr, nullptr, TRACE_EVENT_FLAG_NONE); + } + } + + // This Data struct workaround is to avoid initializing all the members + // in Data during construction of this object, since this object is always + // constructed, even when tracing is disabled. If the members of Data were + // members of this class instead, compiler warnings occur about potential + // uninitialized accesses. + struct Data { + const unsigned char* category_enabled; + const char* name; + }; + Data* p_data_; + Data data_; +}; + +} // namespace trace_event_internal +} // namespace webrtc +#else + +//////////////////////////////////////////////////////////////////////////////// +// This section defines no-op alternatives to the tracing macros when +// RTC_DISABLE_TRACE_EVENTS is defined. + +# define RTC_NOOP() \ + do { \ + } while (0) + +# define TRACE_STR_COPY(str) RTC_NOOP() + +# define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name + +# define TRACE_ID_MANGLE(id) 0 + +# define TRACE_EVENT0(category, name) RTC_NOOP() +# define TRACE_EVENT1(category, name, arg1_name, arg1_val) RTC_NOOP() +# define TRACE_EVENT2(category, name, arg1_name, arg1_val, arg2_name, \ + arg2_val) \ + RTC_NOOP() + +# define TRACE_EVENT_INSTANT0(category, name) RTC_NOOP() +# define TRACE_EVENT_INSTANT1(category, name, arg1_name, arg1_val) RTC_NOOP() + +# define TRACE_EVENT_INSTANT2(category, name, arg1_name, arg1_val, arg2_name, \ + arg2_val) \ + RTC_NOOP() + +# define TRACE_EVENT_COPY_INSTANT0(category, name) RTC_NOOP() +# define TRACE_EVENT_COPY_INSTANT1(category, name, arg1_name, arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_INSTANT2(category, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + RTC_NOOP() + +# define TRACE_EVENT_BEGIN0(category, name) RTC_NOOP() +# define TRACE_EVENT_BEGIN1(category, name, arg1_name, arg1_val) RTC_NOOP() +# define TRACE_EVENT_BEGIN2(category, name, arg1_name, arg1_val, arg2_name, \ + arg2_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_BEGIN0(category, name) RTC_NOOP() +# define TRACE_EVENT_COPY_BEGIN1(category, name, arg1_name, arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_BEGIN2(category, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + RTC_NOOP() + +# define TRACE_EVENT_END0(category, name) RTC_NOOP() +# define TRACE_EVENT_END1(category, name, arg1_name, arg1_val) RTC_NOOP() +# define TRACE_EVENT_END2(category, name, arg1_name, arg1_val, arg2_name, \ + arg2_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_END0(category, name) RTC_NOOP() +# define TRACE_EVENT_COPY_END1(category, name, arg1_name, arg1_val) RTC_NOOP() +# define TRACE_EVENT_COPY_END2(category, name, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + RTC_NOOP() + +# define TRACE_COUNTER1(category, name, value) RTC_NOOP() +# define TRACE_COPY_COUNTER1(category, name, value) RTC_NOOP() + +# define TRACE_COUNTER2(category, name, value1_name, value1_val, value2_name, \ + value2_val) \ + RTC_NOOP() +# define TRACE_COPY_COUNTER2(category, name, value1_name, value1_val, \ + value2_name, value2_val) \ + RTC_NOOP() + +# define TRACE_COUNTER_ID1(category, name, id, value) RTC_NOOP() +# define TRACE_COPY_COUNTER_ID1(category, name, id, value) RTC_NOOP() + +# define TRACE_COUNTER_ID2(category, name, id, value1_name, value1_val, \ + value2_name, value2_val) \ + RTC_NOOP() +# define TRACE_COPY_COUNTER_ID2(category, name, id, value1_name, value1_val, \ + value2_name, value2_val) \ + RTC_NOOP() + +# define TRACE_EVENT_ASYNC_BEGIN0(category, name, id) RTC_NOOP() +# define TRACE_EVENT_ASYNC_BEGIN1(category, name, id, arg1_name, arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_ASYNC_BEGIN2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_ASYNC_BEGIN0(category, name, id) RTC_NOOP() +# define TRACE_EVENT_COPY_ASYNC_BEGIN1(category, name, id, arg1_name, \ + arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_ASYNC_BEGIN2(category, name, id, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + RTC_NOOP() + +# define TRACE_EVENT_ASYNC_STEP0(category, name, id, step) RTC_NOOP() +# define TRACE_EVENT_ASYNC_STEP1(category, name, id, step, arg1_name, \ + arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_ASYNC_STEP0(category, name, id, step) RTC_NOOP() +# define TRACE_EVENT_COPY_ASYNC_STEP1(category, name, id, step, arg1_name, \ + arg1_val) \ + RTC_NOOP() + +# define TRACE_EVENT_ASYNC_END0(category, name, id) RTC_NOOP() +# define TRACE_EVENT_ASYNC_END1(category, name, id, arg1_name, arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_ASYNC_END2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_ASYNC_END0(category, name, id) RTC_NOOP() +# define TRACE_EVENT_COPY_ASYNC_END1(category, name, id, arg1_name, arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_ASYNC_END2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + RTC_NOOP() + +# define TRACE_EVENT_FLOW_BEGIN0(category, name, id) RTC_NOOP() +# define TRACE_EVENT_FLOW_BEGIN1(category, name, id, arg1_name, arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_FLOW_BEGIN2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_FLOW_BEGIN0(category, name, id) RTC_NOOP() +# define TRACE_EVENT_COPY_FLOW_BEGIN1(category, name, id, arg1_name, \ + arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_FLOW_BEGIN2(category, name, id, arg1_name, \ + arg1_val, arg2_name, arg2_val) \ + RTC_NOOP() + +# define TRACE_EVENT_FLOW_STEP0(category, name, id, step) RTC_NOOP() +# define TRACE_EVENT_FLOW_STEP1(category, name, id, step, arg1_name, \ + arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_FLOW_STEP0(category, name, id, step) RTC_NOOP() +# define TRACE_EVENT_COPY_FLOW_STEP1(category, name, id, step, arg1_name, \ + arg1_val) \ + RTC_NOOP() + +# define TRACE_EVENT_FLOW_END0(category, name, id) RTC_NOOP() +# define TRACE_EVENT_FLOW_END1(category, name, id, arg1_name, arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_FLOW_END2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_FLOW_END0(category, name, id) RTC_NOOP() +# define TRACE_EVENT_COPY_FLOW_END1(category, name, id, arg1_name, arg1_val) \ + RTC_NOOP() +# define TRACE_EVENT_COPY_FLOW_END2(category, name, id, arg1_name, arg1_val, \ + arg2_name, arg2_val) \ + RTC_NOOP() + +# define TRACE_EVENT_API_GET_CATEGORY_ENABLED "" + +# define TRACE_EVENT_API_ADD_TRACE_EVENT RTC_NOOP() + +#endif // RTC_TRACE_EVENTS_ENABLED + +#endif // GECKO_TRACE_EVENT_H_ diff --git a/tools/profiler/public/MicroGeckoProfiler.h b/tools/profiler/public/MicroGeckoProfiler.h new file mode 100644 index 0000000000..7b735e1eec --- /dev/null +++ b/tools/profiler/public/MicroGeckoProfiler.h @@ -0,0 +1,130 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// This contains things related to the Gecko profiler, for use in third_party +// code. It is very minimal and is designed to be used by patching over +// upstream code. +// Only use the C ABI and guard C++ code with #ifdefs, don't pull anything from +// Gecko, it must be possible to include the header file into any C++ codebase. + +#ifndef MICRO_GECKO_PROFILER +#define MICRO_GECKO_PROFILER + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#ifdef _WIN32 +# include +#else +# include +#endif + +extern MOZ_EXPORT void uprofiler_register_thread(const char* aName, + void* aGuessStackTop); + +extern MOZ_EXPORT void uprofiler_unregister_thread(); + +extern MOZ_EXPORT void uprofiler_simple_event_marker( + const char* name, char phase, int num_args, const char** arg_names, + const unsigned char* arg_types, const unsigned long long* arg_values); +#ifdef __cplusplus +} + +struct AutoRegisterProfiler { + AutoRegisterProfiler(const char* name, char* stacktop) { + if (getenv("MOZ_UPROFILER_LOG_THREAD_CREATION")) { + printf("### UProfiler: new thread: '%s'\n", name); + } + uprofiler_register_thread(name, stacktop); + } + ~AutoRegisterProfiler() { uprofiler_unregister_thread(); } +}; +#endif // __cplusplus + +void uprofiler_simple_event_marker(const char* name, char phase, int num_args, + const char** arg_names, + const unsigned char* arg_types, + const unsigned long long* arg_values); + +struct UprofilerFuncPtrs { + void (*register_thread)(const char* aName, void* aGuessStackTop); + void (*unregister_thread)(); + void (*simple_event_marker)(const char* name, char phase, int num_args, + const char** arg_names, + const unsigned char* arg_types, + const unsigned long long* arg_values); +}; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-function" + +static void register_thread_noop(const char* aName, void* aGuessStackTop) { + /* no-op */ +} +static void unregister_thread_noop() { /* no-op */ +} +static void simple_event_marker_noop(const char* name, char phase, int num_args, + const char** arg_names, + const unsigned char* arg_types, + const unsigned long long* arg_values) { + /* no-op */ +} + +#pragma GCC diagnostic pop + +#if defined(_WIN32) +# define UPROFILER_OPENLIB() GetModuleHandle(NULL) +#else +# define UPROFILER_OPENLIB() dlopen(NULL, RTLD_NOW) +#endif + +#if defined(_WIN32) +# define UPROFILER_GET_SYM(handle, sym) GetProcAddress(handle, sym) +#else +# define UPROFILER_GET_SYM(handle, sym) dlsym(handle, sym) +#endif + +#if defined(_WIN32) +# define UPROFILER_PRINT_ERROR(func) fprintf(stderr, "%s error\n", #func); +#else +# define UPROFILER_PRINT_ERROR(func) \ + fprintf(stderr, "%s error: %s\n", #func, dlerror()); +#endif + +// Assumes that a variable of type UprofilerFuncPtrs, named uprofiler +// is accessible in the scope +#define UPROFILER_GET_FUNCTIONS() \ + void* handle = UPROFILER_OPENLIB(); \ + if (!handle) { \ + UPROFILER_PRINT_ERROR(UPROFILER_OPENLIB); \ + uprofiler.register_thread = register_thread_noop; \ + uprofiler.unregister_thread = unregister_thread_noop; \ + uprofiler.simple_event_marker = simple_event_marker_noop; \ + } \ + uprofiler.register_thread = \ + UPROFILER_GET_SYM(handle, "uprofiler_register_thread"); \ + if (!uprofiler.register_thread) { \ + UPROFILER_PRINT_ERROR(uprofiler_unregister_thread); \ + uprofiler.register_thread = register_thread_noop; \ + } \ + uprofiler.unregister_thread = \ + UPROFILER_GET_SYM(handle, "uprofiler_unregister_thread"); \ + if (!uprofiler.unregister_thread) { \ + UPROFILER_PRINT_ERROR(uprofiler_unregister_thread); \ + uprofiler.unregister_thread = unregister_thread_noop; \ + } \ + uprofiler.simple_event_marker = \ + UPROFILER_GET_SYM(handle, "uprofiler_simple_event_marker"); \ + if (!uprofiler.simple_event_marker) { \ + UPROFILER_PRINT_ERROR(uprofiler_simple_event_marker); \ + uprofiler.simple_event_marker = simple_event_marker_noop; \ + } + +#endif // MICRO_GECKO_PROFILER diff --git a/tools/profiler/public/ProfileAdditionalInformation.h b/tools/profiler/public/ProfileAdditionalInformation.h new file mode 100644 index 0000000000..c4cc8697b0 --- /dev/null +++ b/tools/profiler/public/ProfileAdditionalInformation.h @@ -0,0 +1,90 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// The Gecko Profiler is an always-on profiler that takes fast and low overhead +// samples of the program execution using only userspace functionality for +// portability. The goal of this module is to provide performance data in a +// generic cross-platform way without requiring custom tools or kernel support. +// +// Samples are collected to form a timeline with optional timeline event +// (markers) used for filtering. The samples include both native stacks and +// platform-independent "label stack" frames. + +#ifndef ProfileAdditionalInformation_h +#define ProfileAdditionalInformation_h + +#ifdef MOZ_GECKO_PROFILER +# include "shared-libraries.h" +#endif +#include "js/Value.h" +#include "nsString.h" + +namespace IPC { +class MessageReader; +class MessageWriter; +template +struct ParamTraits; +} // namespace IPC + +namespace mozilla { +// This structure contains additional information gathered while generating the +// profile json and iterating the buffer. +struct ProfileGenerationAdditionalInformation { +#ifdef MOZ_GECKO_PROFILER + ProfileGenerationAdditionalInformation() = default; + explicit ProfileGenerationAdditionalInformation( + const SharedLibraryInfo&& aSharedLibraries) + : mSharedLibraries(aSharedLibraries) {} + + size_t SizeOf() const { return mSharedLibraries.SizeOf(); } + + void Append(ProfileGenerationAdditionalInformation&& aOther) { + mSharedLibraries.AddAllSharedLibraries(aOther.mSharedLibraries); + } + + void FinishGathering() { mSharedLibraries.DeduplicateEntries(); } + + void ToJSValue(JSContext* aCx, JS::MutableHandle aRetVal) const; + + SharedLibraryInfo mSharedLibraries; +#endif // MOZ_GECKO_PROFILER +}; + +struct ProfileAndAdditionalInformation { + ProfileAndAdditionalInformation() = default; + explicit ProfileAndAdditionalInformation(const nsCString&& aProfile) + : mProfile(aProfile) {} + + ProfileAndAdditionalInformation( + const nsCString&& aProfile, + const ProfileGenerationAdditionalInformation&& aAdditionalInformation) + : mProfile(aProfile), + mAdditionalInformation(Some(aAdditionalInformation)) {} + + size_t SizeOf() const { + size_t size = mProfile.Length(); +#ifdef MOZ_GECKO_PROFILER + if (mAdditionalInformation.isSome()) { + size += mAdditionalInformation->SizeOf(); + } +#endif + return size; + } + + nsCString mProfile; + Maybe mAdditionalInformation; +}; +} // namespace mozilla + +namespace IPC { +template <> +struct ParamTraits { + typedef mozilla::ProfileGenerationAdditionalInformation paramType; + + static void Write(MessageWriter* aWriter, const paramType& aParam); + static bool Read(MessageReader* aReader, paramType* aResult); +}; +} // namespace IPC + +#endif // ProfileAdditionalInformation_h diff --git a/tools/profiler/public/ProfileBufferEntrySerializationGeckoExtensions.h b/tools/profiler/public/ProfileBufferEntrySerializationGeckoExtensions.h new file mode 100644 index 0000000000..1578bd2ddc --- /dev/null +++ b/tools/profiler/public/ProfileBufferEntrySerializationGeckoExtensions.h @@ -0,0 +1,160 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfileBufferEntrySerializationGeckoExtensions_h +#define ProfileBufferEntrySerializationGeckoExtensions_h + +#include "mozilla/ProfileBufferEntrySerialization.h" + +#include "js/AllocPolicy.h" +#include "js/Utility.h" +#include "nsString.h" + +namespace mozilla { + +// ---------------------------------------------------------------------------- +// ns[C]String + +// nsString or nsCString contents are serialized as the number of bytes (encoded +// as ULEB128) and all the characters in the string. The terminal '\0' is +// omitted. +// Make sure you write and read with the same character type! +// +// Usage: `nsCString s = ...; aEW.WriteObject(s);` +template +struct ProfileBufferEntryWriter::Serializer> { + static Length Bytes(const nsTString& aS) { + const auto length = aS.Length(); + return ProfileBufferEntryWriter::ULEB128Size(length) + + static_cast(length * sizeof(CHAR)); + } + + static void Write(ProfileBufferEntryWriter& aEW, const nsTString& aS) { + const auto length = aS.Length(); + aEW.WriteULEB128(length); + // Copy the bytes from the string's buffer. + aEW.WriteBytes(aS.Data(), length * sizeof(CHAR)); + } +}; + +template +struct ProfileBufferEntryReader::Deserializer> { + static void ReadInto(ProfileBufferEntryReader& aER, nsTString& aS) { + aS = Read(aER); + } + + static nsTString Read(ProfileBufferEntryReader& aER) { + const Length length = aER.ReadULEB128(); + nsTString s; + // BulkWrite is the most efficient way to copy bytes into the target string. + auto writerOrErr = s.BulkWrite(length, 0, true); + MOZ_RELEASE_ASSERT(!writerOrErr.isErr()); + + auto writer = writerOrErr.unwrap(); + + aER.ReadBytes(writer.Elements(), length * sizeof(CHAR)); + writer.Finish(length, true); + return s; + } +}; + +// ---------------------------------------------------------------------------- +// nsAuto[C]String + +// nsAuto[C]String contents are serialized as the number of bytes (encoded as +// ULEB128) and all the characters in the string. The terminal '\0' is omitted. +// Make sure you write and read with the same character type! +// +// Usage: `nsAutoCString s = ...; aEW.WriteObject(s);` +template +struct ProfileBufferEntryWriter::Serializer> { + static Length Bytes(const nsTAutoStringN& aS) { + const auto length = aS.Length(); + return ProfileBufferEntryWriter::ULEB128Size(length) + + static_cast(length * sizeof(CHAR)); + } + + static void Write(ProfileBufferEntryWriter& aEW, + const nsTAutoStringN& aS) { + const auto length = aS.Length(); + aEW.WriteULEB128(length); + // Copy the bytes from the string's buffer. + aEW.WriteBytes(aS.BeginReading(), length * sizeof(CHAR)); + } +}; + +template +struct ProfileBufferEntryReader::Deserializer> { + static void ReadInto(ProfileBufferEntryReader& aER, + nsTAutoStringN& aS) { + aS = Read(aER); + } + + static nsTAutoStringN Read(ProfileBufferEntryReader& aER) { + const auto length = aER.ReadULEB128(); + nsTAutoStringN s; + // BulkWrite is the most efficient way to copy bytes into the target string. + auto writerOrErr = s.BulkWrite(length, 0, true); + MOZ_RELEASE_ASSERT(!writerOrErr.isErr()); + + auto writer = writerOrErr.unwrap(); + aER.ReadBytes(writer.Elements(), length * sizeof(CHAR)); + writer.Finish(length, true); + return s; + } +}; + +// ---------------------------------------------------------------------------- +// JS::UniqueChars + +// JS::UniqueChars contents are serialized as the number of bytes (encoded as +// ULEB128) and all the characters in the string. The terminal '\0' is omitted. +// Note: A nullptr pointer will be serialized like an empty string, so when +// deserializing it will result in an allocated buffer only containing a +// single null terminator. +// +// Usage: `JS::UniqueChars s = ...; aEW.WriteObject(s);` +template <> +struct ProfileBufferEntryWriter::Serializer { + static Length Bytes(const JS::UniqueChars& aS) { + if (!aS) { + return ProfileBufferEntryWriter::ULEB128Size(0); + } + const auto len = static_cast(strlen(aS.get())); + return ProfileBufferEntryWriter::ULEB128Size(len) + len; + } + + static void Write(ProfileBufferEntryWriter& aEW, const JS::UniqueChars& aS) { + if (!aS) { + aEW.WriteULEB128(0); + return; + } + const auto len = static_cast(strlen(aS.get())); + aEW.WriteULEB128(len); + aEW.WriteBytes(aS.get(), len); + } +}; + +template <> +struct ProfileBufferEntryReader::Deserializer { + static void ReadInto(ProfileBufferEntryReader& aER, JS::UniqueChars& aS) { + aS = Read(aER); + } + + static JS::UniqueChars Read(ProfileBufferEntryReader& aER) { + const auto len = aER.ReadULEB128(); + // Use the same allocation policy as JS_smprintf. + char* buffer = + static_cast(js::SystemAllocPolicy{}.pod_malloc(len + 1)); + aER.ReadBytes(buffer, len); + buffer[len] = '\0'; + return JS::UniqueChars(buffer); + } +}; + +} // namespace mozilla + +#endif // ProfileBufferEntrySerializationGeckoExtensions_h diff --git a/tools/profiler/public/ProfileJSONWriter.h b/tools/profiler/public/ProfileJSONWriter.h new file mode 100644 index 0000000000..8d23d7a890 --- /dev/null +++ b/tools/profiler/public/ProfileJSONWriter.h @@ -0,0 +1,19 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef PROFILEJSONWRITER_H +#define PROFILEJSONWRITER_H + +#include "mozilla/BaseProfileJSONWriter.h" + +using ChunkedJSONWriteFunc = mozilla::baseprofiler::ChunkedJSONWriteFunc; +using JSONSchemaWriter = mozilla::baseprofiler::JSONSchemaWriter; +using OStreamJSONWriteFunc = mozilla::baseprofiler::OStreamJSONWriteFunc; +using SpliceableChunkedJSONWriter = + mozilla::baseprofiler::SpliceableChunkedJSONWriter; +using SpliceableJSONWriter = mozilla::baseprofiler::SpliceableJSONWriter; +using UniqueJSONStrings = mozilla::baseprofiler::UniqueJSONStrings; + +#endif // PROFILEJSONWRITER_H diff --git a/tools/profiler/public/ProfilerBindings.h b/tools/profiler/public/ProfilerBindings.h new file mode 100644 index 0000000000..096a860130 --- /dev/null +++ b/tools/profiler/public/ProfilerBindings.h @@ -0,0 +1,162 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* FFI functions for Profiler Rust API to call into profiler */ + +#ifndef ProfilerBindings_h +#define ProfilerBindings_h + +#include "mozilla/BaseProfilerMarkersPrerequisites.h" + +#include +#include + +namespace mozilla { +class AutoProfilerLabel; +class MarkerSchema; +class MarkerTiming; +class TimeStamp; +enum class StackCaptureOptions; + +namespace baseprofiler { +enum class ProfilingCategoryPair : uint32_t; +class SpliceableJSONWriter; +} // namespace baseprofiler + +} // namespace mozilla + +namespace JS { +enum class ProfilingCategoryPair : uint32_t; +} // namespace JS + +// Everything in here is safe to include unconditionally, implementations must +// take !MOZ_GECKO_PROFILER into account. +extern "C" { + +void gecko_profiler_register_thread(const char* aName); +void gecko_profiler_unregister_thread(); + +void gecko_profiler_construct_label(mozilla::AutoProfilerLabel* aAutoLabel, + JS::ProfilingCategoryPair aCategoryPair); +void gecko_profiler_destruct_label(mozilla::AutoProfilerLabel* aAutoLabel); + +// Construct, clone and destruct the timestamp for profiler time. +void gecko_profiler_construct_timestamp_now(mozilla::TimeStamp* aTimeStamp); +void gecko_profiler_clone_timestamp(const mozilla::TimeStamp* aSrcTimeStamp, + mozilla::TimeStamp* aDestTimeStamp); +void gecko_profiler_destruct_timestamp(mozilla::TimeStamp* aTimeStamp); + +// Addition and subtraction for timestamp. +void gecko_profiler_add_timestamp(const mozilla::TimeStamp* aTimeStamp, + mozilla::TimeStamp* aDestTimeStamp, + double aMicroseconds); +void gecko_profiler_subtract_timestamp(const mozilla::TimeStamp* aTimeStamp, + mozilla::TimeStamp* aDestTimeStamp, + double aMicroseconds); + +// Various MarkerTiming constructors and a destructor. +void gecko_profiler_construct_marker_timing_instant_at( + mozilla::MarkerTiming* aMarkerTiming, const mozilla::TimeStamp* aTime); +void gecko_profiler_construct_marker_timing_instant_now( + mozilla::MarkerTiming* aMarkerTiming); +void gecko_profiler_construct_marker_timing_interval( + mozilla::MarkerTiming* aMarkerTiming, const mozilla::TimeStamp* aStartTime, + const mozilla::TimeStamp* aEndTime); +void gecko_profiler_construct_marker_timing_interval_until_now_from( + mozilla::MarkerTiming* aMarkerTiming, const mozilla::TimeStamp* aStartTime); +void gecko_profiler_construct_marker_timing_interval_start( + mozilla::MarkerTiming* aMarkerTiming, const mozilla::TimeStamp* aTime); +void gecko_profiler_construct_marker_timing_interval_end( + mozilla::MarkerTiming* aMarkerTiming, const mozilla::TimeStamp* aTime); +void gecko_profiler_destruct_marker_timing( + mozilla::MarkerTiming* aMarkerTiming); + +// MarkerSchema constructors and destructor. +void gecko_profiler_construct_marker_schema( + mozilla::MarkerSchema* aMarkerSchema, + const mozilla::MarkerSchema::Location* aLocations, size_t aLength); +void gecko_profiler_construct_marker_schema_with_special_front_end_location( + mozilla::MarkerSchema* aMarkerSchema); +void gecko_profiler_destruct_marker_schema( + mozilla::MarkerSchema* aMarkerSchema); + +// MarkerSchema methods for adding labels. +void gecko_profiler_marker_schema_set_chart_label( + mozilla::MarkerSchema* aSchema, const char* aLabel, size_t aLabelLength); +void gecko_profiler_marker_schema_set_tooltip_label( + mozilla::MarkerSchema* aSchema, const char* aLabel, size_t aLabelLength); +void gecko_profiler_marker_schema_set_table_label( + mozilla::MarkerSchema* aSchema, const char* aLabel, size_t aLabelLength); +void gecko_profiler_marker_schema_set_all_labels(mozilla::MarkerSchema* aSchema, + const char* aLabel, + size_t aLabelLength); + +// MarkerSchema methods for adding key/key-label values. +void gecko_profiler_marker_schema_add_key_format( + mozilla::MarkerSchema* aSchema, const char* aKey, size_t aKeyLength, + mozilla::MarkerSchema::Format aFormat); +void gecko_profiler_marker_schema_add_key_label_format( + mozilla::MarkerSchema* aSchema, const char* aKey, size_t aKeyLength, + const char* aLabel, size_t aLabelLength, + mozilla::MarkerSchema::Format aFormat); +void gecko_profiler_marker_schema_add_key_format_searchable( + mozilla::MarkerSchema* aSchema, const char* aKey, size_t aKeyLength, + mozilla::MarkerSchema::Format aFormat, + mozilla::MarkerSchema::Searchable aSearchable); +void gecko_profiler_marker_schema_add_key_label_format_searchable( + mozilla::MarkerSchema* aSchema, const char* aKey, size_t aKeyLength, + const char* aLabel, size_t aLabelLength, + mozilla::MarkerSchema::Format aFormat, + mozilla::MarkerSchema::Searchable aSearchable); +void gecko_profiler_marker_schema_add_static_label_value( + mozilla::MarkerSchema* aSchema, const char* aLabel, size_t aLabelLength, + const char* aValue, size_t aValueLength); + +// Stream MarkerSchema to SpliceableJSONWriter. +void gecko_profiler_marker_schema_stream( + mozilla::baseprofiler::SpliceableJSONWriter* aWriter, const char* aName, + size_t aNameLength, mozilla::MarkerSchema* aMarkerSchema, + void* aStreamedNamesSet); + +// Various SpliceableJSONWriter methods to add properties. +void gecko_profiler_json_writer_int_property( + mozilla::baseprofiler::SpliceableJSONWriter* aWriter, const char* aName, + size_t aNameLength, int64_t aValue); +void gecko_profiler_json_writer_float_property( + mozilla::baseprofiler::SpliceableJSONWriter* aWriter, const char* aName, + size_t aNameLength, double aValue); +void gecko_profiler_json_writer_bool_property( + mozilla::baseprofiler::SpliceableJSONWriter* aWriter, const char* aName, + size_t aNameLength, bool aValue); +void gecko_profiler_json_writer_string_property( + mozilla::baseprofiler::SpliceableJSONWriter* aWriter, const char* aName, + size_t aNameLength, const char* aValue, size_t aValueLength); +void gecko_profiler_json_writer_null_property( + mozilla::baseprofiler::SpliceableJSONWriter* aWriter, const char* aName, + size_t aNameLength); + +// Marker APIs. +void gecko_profiler_add_marker_untyped( + const char* aName, size_t aNameLength, + mozilla::baseprofiler::ProfilingCategoryPair aCategoryPair, + mozilla::MarkerTiming* aMarkerTiming, + mozilla::StackCaptureOptions aStackCaptureOptions); +void gecko_profiler_add_marker_text( + const char* aName, size_t aNameLength, + mozilla::baseprofiler::ProfilingCategoryPair aCategoryPair, + mozilla::MarkerTiming* aMarkerTiming, + mozilla::StackCaptureOptions aStackCaptureOptions, const char* aText, + size_t aTextLength); +void gecko_profiler_add_marker( + const char* aName, size_t aNameLength, + mozilla::baseprofiler::ProfilingCategoryPair aCategoryPair, + mozilla::MarkerTiming* aMarkerTiming, + mozilla::StackCaptureOptions aStackCaptureOptions, uint8_t aMarkerTag, + const uint8_t* aPayload, size_t aPayloadSize); + +} // extern "C" + +#endif // ProfilerBindings_h diff --git a/tools/profiler/public/ProfilerChild.h b/tools/profiler/public/ProfilerChild.h new file mode 100644 index 0000000000..a781784aae --- /dev/null +++ b/tools/profiler/public/ProfilerChild.h @@ -0,0 +1,106 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerChild_h +#define ProfilerChild_h + +#include "mozilla/BaseProfilerDetail.h" +#include "mozilla/DataMutex.h" +#include "mozilla/PProfilerChild.h" +#include "mozilla/ProfileBufferControlledChunkManager.h" +#include "mozilla/ProgressLogger.h" +#include "mozilla/RefPtr.h" +#include "ProfileAdditionalInformation.h" + +class nsIThread; +struct PRThread; + +namespace mozilla { + +// The ProfilerChild actor is created in all processes except for the main +// process. The corresponding ProfilerParent actor is created in the main +// process, and it will notify us about profiler state changes and request +// profiles from us. +class ProfilerChild final : public PProfilerChild, + public mozilla::ipc::IShmemAllocator { + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ProfilerChild, final) + + ProfilerChild(); + + // Collects and returns a profile. + // This method can be used to grab a profile just before PProfiler is torn + // down. The collected profile should then be sent through a different + // message channel that is guaranteed to stay open long enough. + ProfileAndAdditionalInformation GrabShutdownProfile(); + + void Destroy(); + + // This should be called regularly from outside of the profiler lock. + static void ProcessPendingUpdate(); + + static bool IsLockedOnCurrentThread(); + + private: + virtual ~ProfilerChild(); + + mozilla::ipc::IPCResult RecvStart(const ProfilerInitParams& params, + StartResolver&& aResolve) override; + mozilla::ipc::IPCResult RecvEnsureStarted( + const ProfilerInitParams& params, + EnsureStartedResolver&& aResolve) override; + mozilla::ipc::IPCResult RecvStop(StopResolver&& aResolve) override; + mozilla::ipc::IPCResult RecvPause(PauseResolver&& aResolve) override; + mozilla::ipc::IPCResult RecvResume(ResumeResolver&& aResolve) override; + mozilla::ipc::IPCResult RecvPauseSampling( + PauseSamplingResolver&& aResolve) override; + mozilla::ipc::IPCResult RecvResumeSampling( + ResumeSamplingResolver&& aResolve) override; + mozilla::ipc::IPCResult RecvWaitOnePeriodicSampling( + WaitOnePeriodicSamplingResolver&& aResolve) override; + mozilla::ipc::IPCResult RecvAwaitNextChunkManagerUpdate( + AwaitNextChunkManagerUpdateResolver&& aResolve) override; + mozilla::ipc::IPCResult RecvDestroyReleasedChunksAtOrBefore( + const TimeStamp& aTimeStamp) override; + mozilla::ipc::IPCResult RecvGatherProfile( + GatherProfileResolver&& aResolve) override; + mozilla::ipc::IPCResult RecvGetGatherProfileProgress( + GetGatherProfileProgressResolver&& aResolve) override; + mozilla::ipc::IPCResult RecvClearAllPages() override; + + void ActorDestroy(ActorDestroyReason aActorDestroyReason) override; + + FORWARD_SHMEM_ALLOCATOR_TO(PProfilerChild) + + void SetupChunkManager(); + void ResetChunkManager(); + void ResolveChunkUpdate( + PProfilerChild::AwaitNextChunkManagerUpdateResolver& aResolve); + void ProcessChunkManagerUpdate( + ProfileBufferControlledChunkManager::Update&& aUpdate); + + static void GatherProfileThreadFunction(void* already_AddRefedParameters); + + nsCOMPtr mThread; + bool mDestroyed; + + ProfileBufferControlledChunkManager* mChunkManager = nullptr; + AwaitNextChunkManagerUpdateResolver mAwaitNextChunkManagerUpdateResolver; + ProfileBufferControlledChunkManager::Update mChunkManagerUpdate; + + struct ProfilerChildAndUpdate { + RefPtr mProfilerChild; + ProfileBufferControlledChunkManager::Update mUpdate; + }; + static DataMutexBase + sPendingChunkManagerUpdate; + + RefPtr mGatherProfileProgress; +}; + +} // namespace mozilla + +#endif // ProfilerChild_h diff --git a/tools/profiler/public/ProfilerCodeAddressService.h b/tools/profiler/public/ProfilerCodeAddressService.h new file mode 100644 index 0000000000..9d75c363b3 --- /dev/null +++ b/tools/profiler/public/ProfilerCodeAddressService.h @@ -0,0 +1,52 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerCodeAddressService_h +#define ProfilerCodeAddressService_h + +#include "CodeAddressService.h" +#include "nsTArray.h" + +namespace mozilla { + +// This SymbolTable struct, and the CompactSymbolTable struct in the +// profiler rust module, have the exact same memory layout. +// nsTArray and ThinVec are FFI-compatible, because the thin-vec crate is +// being compiled with the "gecko-ffi" feature enabled. +struct SymbolTable { + SymbolTable() = default; + SymbolTable(SymbolTable&& aOther) = default; + + nsTArray mAddrs; + nsTArray mIndex; + nsTArray mBuffer; +}; + +} // namespace mozilla + +/** + * Cache and look up function symbol names. + * + * We don't template this on AllocPolicy since we need to use nsTArray in + * SymbolTable above, which doesn't work with AllocPolicy. (We can't switch + * to Vector, as we would lose FFI compatibility with ThinVec.) + */ +class ProfilerCodeAddressService : public mozilla::CodeAddressService<> { + public: + // Like GetLocation, but only returns the symbol name. + bool GetFunction(const void* aPc, nsACString& aResult); + + private: +#if defined(XP_LINUX) || defined(XP_FREEBSD) + // Map of library names (owned by mLibraryStrings) to SymbolTables filled + // in by profiler_get_symbol_table. + mozilla::HashMap, AllocPolicy> + mSymbolTables; +#endif +}; + +#endif // ProfilerCodeAddressService_h diff --git a/tools/profiler/public/ProfilerControl.h b/tools/profiler/public/ProfilerControl.h new file mode 100644 index 0000000000..466d15eb69 --- /dev/null +++ b/tools/profiler/public/ProfilerControl.h @@ -0,0 +1,190 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// APIs that control the lifetime of the profiler: Initialization, start, pause, +// resume, stop, and shutdown. + +#ifndef ProfilerControl_h +#define ProfilerControl_h + +#include "mozilla/BaseProfilerRAIIMacro.h" + +// Everything in here is also safe to include unconditionally, and only defines +// empty macros if MOZ_GECKO_PROFILER is unset. +// If your file only uses particular APIs (e.g., only markers), please consider +// including only the needed headers instead of this one, to reduce compilation +// dependencies. + +enum class IsFastShutdown { + No, + Yes, +}; + +#ifndef MOZ_GECKO_PROFILER + +// This file can be #included unconditionally. However, everything within this +// file must be guarded by a #ifdef MOZ_GECKO_PROFILER, *except* for the +// following macros and functions, which encapsulate the most common operations +// and thus avoid the need for many #ifdefs. + +# define AUTO_PROFILER_INIT ::profiler_init_main_thread_id() +# define AUTO_PROFILER_INIT2 + +// Function stubs for when MOZ_GECKO_PROFILER is not defined. + +static inline void profiler_init(void* stackTop) {} + +static inline void profiler_shutdown( + IsFastShutdown aIsFastShutdown = IsFastShutdown::No) {} + +#else // !MOZ_GECKO_PROFILER + +# include "BaseProfiler.h" +# include "mozilla/Attributes.h" +# include "mozilla/Maybe.h" +# include "mozilla/MozPromise.h" +# include "mozilla/PowerOfTwo.h" +# include "mozilla/Vector.h" + +//--------------------------------------------------------------------------- +// Start and stop the profiler +//--------------------------------------------------------------------------- + +static constexpr mozilla::PowerOfTwo32 PROFILER_DEFAULT_ENTRIES = + mozilla::baseprofiler::BASE_PROFILER_DEFAULT_ENTRIES; + +static constexpr mozilla::PowerOfTwo32 PROFILER_DEFAULT_STARTUP_ENTRIES = + mozilla::baseprofiler::BASE_PROFILER_DEFAULT_STARTUP_ENTRIES; + +# define PROFILER_DEFAULT_INTERVAL BASE_PROFILER_DEFAULT_INTERVAL +# define PROFILER_MAX_INTERVAL BASE_PROFILER_MAX_INTERVAL + +# define PROFILER_DEFAULT_ACTIVE_TAB_ID 0 + +// Initialize the profiler. If MOZ_PROFILER_STARTUP is set the profiler will +// also be started. This call must happen before any other profiler calls +// (except profiler_start(), which will call profiler_init() if it hasn't +// already run). +void profiler_init(void* stackTop); +void profiler_init_threadmanager(); + +// Call this as early as possible +# define AUTO_PROFILER_INIT mozilla::AutoProfilerInit PROFILER_RAII +// Call this after the nsThreadManager is Init()ed +# define AUTO_PROFILER_INIT2 mozilla::AutoProfilerInit2 PROFILER_RAII + +// Clean up the profiler module, stopping it if required. This function may +// also save a shutdown profile if requested. No profiler calls should happen +// after this point and all profiling stack labels should have been popped. +void profiler_shutdown(IsFastShutdown aIsFastShutdown = IsFastShutdown::No); + +// Start the profiler -- initializing it first if necessary -- with the +// selected options. Stops and restarts the profiler if it is already active. +// After starting the profiler is "active". The samples will be recorded in a +// circular buffer. +// "aCapacity" is the maximum number of 8-bytes entries in the profiler's +// circular buffer. +// "aInterval" the sampling interval, measured in millseconds. +// "aFeatures" is the feature set. Features unsupported by this +// platform/configuration are ignored. +// "aFilters" is the list of thread filters. Threads that do not match any +// of the filters are not profiled. A filter matches a thread if +// (a) the thread name contains the filter as a case-insensitive +// substring, or +// (b) the filter is of the form "pid:" where n is the process +// id of the process that the thread is running in. +// "aActiveTabID" BrowserId of the active browser screen's active tab. +// It's being used to determine the profiled tab. It's "0" if +// we failed to get the ID. +// "aDuration" is the duration of entries in the profiler's circular buffer. +// Returns as soon as this process' profiler has started, the returned promise +// gets resolved when profilers in sub-processes (if any) have started. +RefPtr profiler_start( + mozilla::PowerOfTwo32 aCapacity, double aInterval, uint32_t aFeatures, + const char** aFilters, uint32_t aFilterCount, uint64_t aActiveTabID, + const mozilla::Maybe& aDuration = mozilla::Nothing()); + +// Stop the profiler and discard the profile without saving it. A no-op if the +// profiler is inactive. After stopping the profiler is "inactive". +// Returns as soon as this process' profiler has stopped, the returned promise +// gets resolved when profilers in sub-processes (if any) have stopped. +RefPtr profiler_stop(); + +// If the profiler is inactive, start it. If it's already active, restart it if +// the requested settings differ from the current settings. Both the check and +// the state change are performed while the profiler state is locked. +// The only difference to profiler_start is that the current buffer contents are +// not discarded if the profiler is already running with the requested settings. +void profiler_ensure_started( + mozilla::PowerOfTwo32 aCapacity, double aInterval, uint32_t aFeatures, + const char** aFilters, uint32_t aFilterCount, uint64_t aActiveTabID, + const mozilla::Maybe& aDuration = mozilla::Nothing()); + +//--------------------------------------------------------------------------- +// Control the profiler +//--------------------------------------------------------------------------- + +// Pause and resume the profiler. No-ops if the profiler is inactive. While +// paused the profile will not take any samples and will not record any data +// into its buffers. The profiler remains fully initialized in this state. +// Timeline markers will still be stored. This feature will keep JavaScript +// profiling enabled, thus allowing toggling the profiler without invalidating +// the JIT. +// Returns as soon as this process' profiler has paused/resumed, the returned +// promise gets resolved when profilers in sub-processes (if any) have +// paused/resumed. +RefPtr profiler_pause(); +RefPtr profiler_resume(); + +// Only pause and resume the periodic sampling loop, including stack sampling, +// counters, and profiling overheads. +// Returns as soon as this process' profiler has paused/resumed sampling, the +// returned promise gets resolved when profilers in sub-processes (if any) have +// paused/resumed sampling. +RefPtr profiler_pause_sampling(); +RefPtr profiler_resume_sampling(); + +//--------------------------------------------------------------------------- +// Get information from the profiler +//--------------------------------------------------------------------------- + +// Get the params used to start the profiler. Returns 0 and an empty vector +// (via outparams) if the profile is inactive. It's possible that the features +// returned may be slightly different to those requested due to required +// adjustments. +void profiler_get_start_params( + int* aEntrySize, mozilla::Maybe* aDuration, double* aInterval, + uint32_t* aFeatures, + mozilla::Vector* aFilters, + uint64_t* aActiveTabID); + +//--------------------------------------------------------------------------- +// RAII classes +//--------------------------------------------------------------------------- + +namespace mozilla { + +class MOZ_RAII AutoProfilerInit { + public: + explicit AutoProfilerInit() { profiler_init(this); } + + ~AutoProfilerInit() { profiler_shutdown(); } + + private: +}; + +class MOZ_RAII AutoProfilerInit2 { + public: + explicit AutoProfilerInit2() { profiler_init_threadmanager(); } + + private: +}; + +} // namespace mozilla + +#endif // !MOZ_GECKO_PROFILER + +#endif // ProfilerControl_h diff --git a/tools/profiler/public/ProfilerCounts.h b/tools/profiler/public/ProfilerCounts.h new file mode 100644 index 0000000000..86f6cbfe4f --- /dev/null +++ b/tools/profiler/public/ProfilerCounts.h @@ -0,0 +1,296 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerCounts_h +#define ProfilerCounts_h + +#ifndef MOZ_GECKO_PROFILER + +# define PROFILER_DEFINE_COUNT_TOTAL(label, category, description) +# define PROFILER_DEFINE_COUNT(label, category, description) +# define PROFILER_DEFINE_STATIC_COUNT_TOTAL(label, category, description) +# define AUTO_PROFILER_TOTAL(label, count) +# define AUTO_PROFILER_COUNT(label) +# define AUTO_PROFILER_STATIC_COUNT(label, count) + +#else + +# include "mozilla/Atomics.h" + +class BaseProfilerCount; +void profiler_add_sampled_counter(BaseProfilerCount* aCounter); +void profiler_remove_sampled_counter(BaseProfilerCount* aCounter); + +typedef mozilla::Atomic + ProfilerAtomicSigned; +typedef mozilla::Atomic + ProfilerAtomicUnsigned; + +// Counter support +// There are two types of counters: +// 1) a simple counter which can be added to or subtracted from. This could +// track the number of objects of a type, the number of calls to something +// (reflow, JIT, etc). +// 2) a combined counter which has the above, plus a number-of-calls counter +// that is incremented by 1 for each call to modify the count. This provides +// an optional source for a 'heatmap' of access. This can be used (for +// example) to track the amount of memory allocated, and provide a heatmap of +// memory operations (allocs/frees). +// +// Counters are sampled by the profiler once per sample-period. At this time, +// all counters are global to the process. In the future, there might be more +// versions with per-thread or other discriminators. +// +// Typical usage: +// There are two ways to use counters: With heap-created counter objects, +// or using macros. Note: the macros use statics, and will be slightly +// faster/smaller, and you need to care about creating them before using +// them. They're similar to the use-pattern for the other AUTO_PROFILER* +// macros, but they do need the PROFILER_DEFINE* to be use to instantiate +// the statics. +// +// PROFILER_DEFINE_COUNT(mything, "JIT", "Some JIT byte count") +// ... +// void foo() { ... AUTO_PROFILER_COUNT(mything, number_of_bytes_used); ... } +// +// or (to also get a heatmap) +// +// PROFILER_DEFINE_COUNT_TOTAL(mything, "JIT", "Some JIT byte count") +// ... +// void foo() { +// ... +// AUTO_PROFILER_COUNT_TOTAL(mything, number_of_bytes_generated); +// ... +// } +// +// To use without statics/macros: +// +// UniquePtr myCounter; +// ... +// myCounter = +// MakeUnique("mything", "JIT", "Some JIT byte count")); +// ... +// void foo() { ... myCounter->Add(number_of_bytes_generated0; ... } + +class BaseProfilerCount { + public: + BaseProfilerCount(const char* aLabel, ProfilerAtomicSigned* aCounter, + ProfilerAtomicUnsigned* aNumber, const char* aCategory, + const char* aDescription) + : mLabel(aLabel), + mCategory(aCategory), + mDescription(aDescription), + mCounter(aCounter), + mNumber(aNumber) { +# define COUNTER_CANARY 0xDEADBEEF +# ifdef DEBUG + mCanary = COUNTER_CANARY; + mPrevNumber = 0; +# endif + // Can't call profiler_* here since this may be non-xul-library + } + + virtual ~BaseProfilerCount() { +# ifdef DEBUG + mCanary = 0; +# endif + } + + struct CountSample { + int64_t count; + uint64_t number; + // This field indicates if the sample has already been consummed by a call + // to the Sample() method. This allows the profiler to discard duplicate + // samples if the counter sampling rate is lower than the profiler sampling + // rate. This can happen for example with some power meters that sample up + // to every 10ms. + // It should always be true when calling Sample() for the first time. + bool isSampleNew; + }; + virtual CountSample Sample() { + MOZ_ASSERT(mCanary == COUNTER_CANARY); + + CountSample result; + result.count = *mCounter; + result.number = mNumber ? *mNumber : 0; +# ifdef DEBUG + MOZ_ASSERT(result.number >= mPrevNumber); + mPrevNumber = result.number; +# endif + result.isSampleNew = true; + return result; + } + + void Clear() { + *mCounter = 0; + // We don't reset *mNumber or mPrevNumber. We encode numbers as + // positive deltas, and currently we only care about the deltas (for + // e.g. heatmaps). If we ever need to clear mNumber as well, we can an + // alternative method (Reset()) to do so. + } + + // We don't define ++ and Add() here, since the static defines directly + // increment the atomic counters, and the subclasses implement ++ and + // Add() directly. + + // These typically are static strings (for example if you use the macros + // below) + const char* mLabel; + const char* mCategory; + const char* mDescription; + // We're ok with these being un-ordered in race conditions. These are + // pointers because we want to be able to use statics and increment them + // directly. Otherwise we could just have them inline, and not need the + // constructor args. + // These can be static globals (using the macros below), though they + // don't have to be - their lifetime must be longer than the use of them + // by the profiler (see profiler_add/remove_sampled_counter()). If you're + // using a lot of these, they probably should be allocated at runtime (see + // class ProfilerCountOnly below). + ProfilerAtomicSigned* mCounter; + ProfilerAtomicUnsigned* mNumber; // may be null + +# ifdef DEBUG + uint32_t mCanary; + uint64_t mPrevNumber; // value of number from the last Sample() +# endif +}; + +// Designed to be allocated dynamically, and simply incremented with obj++ +// or obj->Add(n) +class ProfilerCounter final : public BaseProfilerCount { + public: + ProfilerCounter(const char* aLabel, const char* aCategory, + const char* aDescription) + : BaseProfilerCount(aLabel, &mCounter, nullptr, aCategory, aDescription) { + // Assume we're in libxul + profiler_add_sampled_counter(this); + } + + virtual ~ProfilerCounter() { profiler_remove_sampled_counter(this); } + + BaseProfilerCount& operator++() { + Add(1); + return *this; + } + + void Add(int64_t aNumber) { mCounter += aNumber; } + + ProfilerAtomicSigned mCounter; +}; + +// Also keeps a heatmap (number of calls to ++/Add()) +class ProfilerCounterTotal final : public BaseProfilerCount { + public: + ProfilerCounterTotal(const char* aLabel, const char* aCategory, + const char* aDescription) + : BaseProfilerCount(aLabel, &mCounter, &mNumber, aCategory, + aDescription) { + // Assume we're in libxul + profiler_add_sampled_counter(this); + } + + virtual ~ProfilerCounterTotal() { profiler_remove_sampled_counter(this); } + + BaseProfilerCount& operator++() { + Add(1); + return *this; + } + + void Add(int64_t aNumber) { + mCounter += aNumber; + mNumber++; + } + + ProfilerAtomicSigned mCounter; + ProfilerAtomicUnsigned mNumber; +}; + +// Defines a counter that is sampled on each profiler tick, with a running +// count (signed), and number-of-instances. Note that because these are two +// independent Atomics, there is a possiblity that count will not include +// the last call, but number of uses will. I think this is not worth +// worrying about +# define PROFILER_DEFINE_COUNT_TOTAL(label, category, description) \ + ProfilerAtomicSigned profiler_count_##label(0); \ + ProfilerAtomicUnsigned profiler_number_##label(0); \ + const char profiler_category_##label[] = category; \ + const char profiler_description_##label[] = description; \ + mozilla::UniquePtr AutoCount_##label; + +// This counts, but doesn't keep track of the number of calls to +// AUTO_PROFILER_COUNT() +# define PROFILER_DEFINE_COUNT(label, category, description) \ + ProfilerAtomicSigned profiler_count_##label(0); \ + const char profiler_category_##label[] = category; \ + const char profiler_description_##label[] = description; \ + mozilla::UniquePtr AutoCount_##label; + +// This will create a static initializer if used, but avoids a possible +// allocation. +# define PROFILER_DEFINE_STATIC_COUNT_TOTAL(label, category, description) \ + ProfilerAtomicSigned profiler_count_##label(0); \ + ProfilerAtomicUnsigned profiler_number_##label(0); \ + BaseProfilerCount AutoCount_##label(#label, &profiler_count_##label, \ + &profiler_number_##label, category, \ + description); + +// If we didn't care about static initializers, we could avoid the need for +// a ptr to the BaseProfilerCount object. + +// XXX It would be better to do this without the if() and without the +// theoretical race to set the UniquePtr (i.e. possible leak). +# define AUTO_PROFILER_COUNT_TOTAL(label, count) \ + do { \ + profiler_number_##label++; /* do this first*/ \ + profiler_count_##label += count; \ + if (!AutoCount_##label) { \ + /* Ignore that we could call this twice in theory, and that we leak \ + * them \ + */ \ + AutoCount_##label.reset(new BaseProfilerCount( \ + #label, &profiler_count_##label, &profiler_number_##label, \ + profiler_category_##label, profiler_description_##label)); \ + profiler_add_sampled_counter(AutoCount_##label.get()); \ + } \ + } while (0) + +# define AUTO_PROFILER_COUNT(label, count) \ + do { \ + profiler_count_##label += count; /* do this first*/ \ + if (!AutoCount_##label) { \ + /* Ignore that we could call this twice in theory, and that we leak \ + * them \ + */ \ + AutoCount_##label.reset(new BaseProfilerCount( \ + #label, nullptr, &profiler_number_##label, \ + profiler_category_##label, profiler_description_##label)); \ + profiler_add_sampled_counter(AutoCount_##label.get()); \ + } \ + } while (0) + +# define AUTO_PROFILER_STATIC_COUNT(label, count) \ + do { \ + profiler_number_##label++; /* do this first*/ \ + profiler_count_##label += count; \ + } while (0) + +// if we need to force the allocation +# define AUTO_PROFILER_FORCE_ALLOCATION(label) \ + do { \ + if (!AutoCount_##label) { \ + /* Ignore that we could call this twice in theory, and that we leak \ + * them \ + */ \ + AutoCount_##label.reset(new BaseProfilerCount( \ + #label, &profiler_count_##label, &profiler_number_##label, \ + profiler_category_##label, profiler_description_##label)); \ + } \ + } while (0) + +#endif // !MOZ_GECKO_PROFILER + +#endif // ProfilerCounts_h diff --git a/tools/profiler/public/ProfilerLabels.h b/tools/profiler/public/ProfilerLabels.h new file mode 100644 index 0000000000..f05e357451 --- /dev/null +++ b/tools/profiler/public/ProfilerLabels.h @@ -0,0 +1,268 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// This header contains all definitions related to profiler labels. +// It is safe to include unconditionally, and only defines empty macros if +// MOZ_GECKO_PROFILER is not set. + +#ifndef ProfilerLabels_h +#define ProfilerLabels_h + +#include "mozilla/ProfilerThreadState.h" + +#include "js/ProfilingCategory.h" +#include "js/ProfilingStack.h" +#include "js/RootingAPI.h" +#include "mozilla/Assertions.h" +#include "mozilla/Atomics.h" +#include "mozilla/Attributes.h" +#include "mozilla/BaseProfilerRAIIMacro.h" +#include "mozilla/Maybe.h" +#include "mozilla/ProfilerThreadRegistration.h" +#include "mozilla/ThreadLocal.h" +#include "nsString.h" + +#include + +struct JSContext; + +// Insert an RAII object in this scope to enter a label stack frame. Any +// samples collected in this scope will contain this label in their stack. +// The label argument must be a static C string. It is usually of the +// form "ClassName::FunctionName". (Ideally we'd use the compiler to provide +// that for us, but __func__ gives us the function name without the class +// name.) If the label applies to only part of a function, you can qualify it +// like this: "ClassName::FunctionName:PartName". +// +// Use AUTO_PROFILER_LABEL_DYNAMIC_* if you want to add additional / dynamic +// information to the label stack frame. +#define AUTO_PROFILER_LABEL(label, categoryPair) \ + mozilla::AutoProfilerLabel PROFILER_RAII( \ + label, nullptr, JS::ProfilingCategoryPair::categoryPair) + +// Similar to AUTO_PROFILER_LABEL, but that adds the RELEVANT_FOR_JS flag. +#define AUTO_PROFILER_LABEL_RELEVANT_FOR_JS(label, categoryPair) \ + mozilla::AutoProfilerLabel PROFILER_RAII( \ + label, nullptr, JS::ProfilingCategoryPair::categoryPair, \ + uint32_t(js::ProfilingStackFrame::Flags::RELEVANT_FOR_JS)) + +// Similar to AUTO_PROFILER_LABEL, but with only one argument: the category +// pair. The label string is taken from the category pair. This is convenient +// for labels like AUTO_PROFILER_LABEL_CATEGORY_PAIR(GRAPHICS_LayerBuilding) +// which would otherwise just repeat the string. +#define AUTO_PROFILER_LABEL_CATEGORY_PAIR(categoryPair) \ + mozilla::AutoProfilerLabel PROFILER_RAII( \ + "", nullptr, JS::ProfilingCategoryPair::categoryPair, \ + uint32_t( \ + js::ProfilingStackFrame::Flags::LABEL_DETERMINED_BY_CATEGORY_PAIR)) + +// Similar to AUTO_PROFILER_LABEL_CATEGORY_PAIR but adding the RELEVANT_FOR_JS +// flag. +#define AUTO_PROFILER_LABEL_CATEGORY_PAIR_RELEVANT_FOR_JS(categoryPair) \ + mozilla::AutoProfilerLabel PROFILER_RAII( \ + "", nullptr, JS::ProfilingCategoryPair::categoryPair, \ + uint32_t( \ + js::ProfilingStackFrame::Flags::LABEL_DETERMINED_BY_CATEGORY_PAIR) | \ + uint32_t(js::ProfilingStackFrame::Flags::RELEVANT_FOR_JS)) + +// Similar to AUTO_PROFILER_LABEL, but with an additional string. The inserted +// RAII object stores the cStr pointer in a field; it does not copy the string. +// +// WARNING: This means that the string you pass to this macro needs to live at +// least until the end of the current scope. Be careful using this macro with +// ns[C]String; the other AUTO_PROFILER_LABEL_DYNAMIC_* macros below are +// preferred because they avoid this problem. +// +// If the profiler samples the current thread and walks the label stack while +// this RAII object is on the stack, it will copy the supplied string into the +// profile buffer. So there's one string copy operation, and it happens at +// sample time. +// +// Compare this to the plain AUTO_PROFILER_LABEL macro, which only accepts +// literal strings: When the label stack frames generated by +// AUTO_PROFILER_LABEL are sampled, no string copy needs to be made because the +// profile buffer can just store the raw pointers to the literal strings. +// Consequently, AUTO_PROFILER_LABEL frames take up considerably less space in +// the profile buffer than AUTO_PROFILER_LABEL_DYNAMIC_* frames. +#define AUTO_PROFILER_LABEL_DYNAMIC_CSTR(label, categoryPair, cStr) \ + mozilla::AutoProfilerLabel PROFILER_RAII( \ + label, cStr, JS::ProfilingCategoryPair::categoryPair) + +// Like AUTO_PROFILER_LABEL_DYNAMIC_CSTR, but with the NONSENSITIVE flag to +// note that it does not contain sensitive information (so we can include it +// in, for example, the BackgroundHangMonitor) +#define AUTO_PROFILER_LABEL_DYNAMIC_CSTR_NONSENSITIVE(label, categoryPair, \ + cStr) \ + mozilla::AutoProfilerLabel PROFILER_RAII( \ + label, cStr, JS::ProfilingCategoryPair::categoryPair, \ + uint32_t(js::ProfilingStackFrame::Flags::NONSENSITIVE)) + +// Similar to AUTO_PROFILER_LABEL_DYNAMIC_CSTR, but takes an nsACString. +// +// Note: The use of the Maybe<>s ensures the scopes for the dynamic string and +// the AutoProfilerLabel are appropriate, while also not incurring the runtime +// cost of the string assignment unless the profiler is active. Therefore, +// unlike AUTO_PROFILER_LABEL and AUTO_PROFILER_LABEL_DYNAMIC_CSTR, this macro +// doesn't push/pop a label when the profiler is inactive. +#define AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING(label, categoryPair, nsCStr) \ + mozilla::Maybe autoCStr; \ + mozilla::Maybe raiiObjectNsCString; \ + if (profiler_is_active()) { \ + autoCStr.emplace(nsCStr); \ + raiiObjectNsCString.emplace(label, autoCStr->get(), \ + JS::ProfilingCategoryPair::categoryPair); \ + } + +#define AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING_RELEVANT_FOR_JS( \ + label, categoryPair, nsCStr) \ + mozilla::Maybe autoCStr; \ + mozilla::Maybe raiiObjectNsCString; \ + if (profiler_is_active()) { \ + autoCStr.emplace(nsCStr); \ + raiiObjectNsCString.emplace( \ + label, autoCStr->get(), JS::ProfilingCategoryPair::categoryPair, \ + uint32_t(js::ProfilingStackFrame::Flags::RELEVANT_FOR_JS)); \ + } + +// Match the conditions for MOZ_ENABLE_BACKGROUND_HANG_MONITOR +#if defined(NIGHTLY_BUILD) && !defined(MOZ_DEBUG) && !defined(MOZ_TSAN) && \ + !defined(MOZ_ASAN) +# define SHOULD_CREATE_ALL_NONSENSITIVE_LABEL_FRAMES true +#else +# define SHOULD_CREATE_ALL_NONSENSITIVE_LABEL_FRAMES profiler_is_active() +#endif + +// See note above AUTO_PROFILER_LABEL_DYNAMIC_CSTR_NONSENSITIVE +#define AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING_NONSENSITIVE( \ + label, categoryPair, nsCStr) \ + mozilla::Maybe autoCStr; \ + mozilla::Maybe raiiObjectNsCString; \ + if (SHOULD_CREATE_ALL_NONSENSITIVE_LABEL_FRAMES) { \ + autoCStr.emplace(nsCStr); \ + raiiObjectNsCString.emplace( \ + label, autoCStr->get(), JS::ProfilingCategoryPair::categoryPair, \ + uint32_t(js::ProfilingStackFrame::Flags::NONSENSITIVE)); \ + } + +// Similar to AUTO_PROFILER_LABEL_DYNAMIC_CSTR, but takes an nsString that is +// is lossily converted to an ASCII string. +// +// Note: The use of the Maybe<>s ensures the scopes for the converted dynamic +// string and the AutoProfilerLabel are appropriate, while also not incurring +// the runtime cost of the string conversion unless the profiler is active. +// Therefore, unlike AUTO_PROFILER_LABEL and AUTO_PROFILER_LABEL_DYNAMIC_CSTR, +// this macro doesn't push/pop a label when the profiler is inactive. +#define AUTO_PROFILER_LABEL_DYNAMIC_LOSSY_NSSTRING(label, categoryPair, nsStr) \ + mozilla::Maybe asciiStr; \ + mozilla::Maybe raiiObjectLossyNsString; \ + if (profiler_is_active()) { \ + asciiStr.emplace(nsStr); \ + raiiObjectLossyNsString.emplace(label, asciiStr->get(), \ + JS::ProfilingCategoryPair::categoryPair); \ + } + +// Similar to AUTO_PROFILER_LABEL, but accepting a JSContext* parameter, and a +// no-op if the profiler is disabled. +// Used to annotate functions for which overhead in the range of nanoseconds is +// noticeable. It avoids overhead from the TLS lookup because it can get the +// ProfilingStack from the JS context, and avoids almost all overhead in the +// case where the profiler is disabled. +#define AUTO_PROFILER_LABEL_FAST(label, categoryPair, ctx) \ + mozilla::AutoProfilerLabel PROFILER_RAII( \ + ctx, label, nullptr, JS::ProfilingCategoryPair::categoryPair) + +// Similar to AUTO_PROFILER_LABEL_FAST, but also takes an extra string and an +// additional set of flags. The flags parameter should carry values from the +// js::ProfilingStackFrame::Flags enum. +#define AUTO_PROFILER_LABEL_DYNAMIC_FAST(label, dynamicString, categoryPair, \ + ctx, flags) \ + mozilla::AutoProfilerLabel PROFILER_RAII( \ + ctx, label, dynamicString, JS::ProfilingCategoryPair::categoryPair, \ + flags) + +namespace mozilla { + +#ifndef MOZ_GECKO_PROFILER + +class MOZ_RAII AutoProfilerLabel { + public: + // This is the AUTO_PROFILER_LABEL and AUTO_PROFILER_LABEL_DYNAMIC variant. + AutoProfilerLabel(const char* aLabel, const char* aDynamicString, + JS::ProfilingCategoryPair aCategoryPair, + uint32_t aFlags = 0) {} + + // This is the AUTO_PROFILER_LABEL_FAST variant. + AutoProfilerLabel(JSContext* aJSContext, const char* aLabel, + const char* aDynamicString, + JS::ProfilingCategoryPair aCategoryPair, uint32_t aFlags) {} + + ~AutoProfilerLabel() {} +}; + +#else // !MOZ_GECKO_PROFILER + +// This class creates a non-owning ProfilingStack reference. Objects of this +// class are stack-allocated, and so exist within a thread, and are thus bounded +// by the lifetime of the thread, which ensures that the references held can't +// be used after the ProfilingStack is destroyed. +class MOZ_RAII AutoProfilerLabel { + public: + // This is the AUTO_PROFILER_LABEL and AUTO_PROFILER_LABEL_DYNAMIC variant. + AutoProfilerLabel(const char* aLabel, const char* aDynamicString, + JS::ProfilingCategoryPair aCategoryPair, + uint32_t aFlags = 0) { + // Get the ProfilingStack from TLS. + ProfilingStack* profilingStack = + profiler::ThreadRegistration::WithOnThreadRefOr( + [](profiler::ThreadRegistration::OnThreadRef aThread) { + return &aThread.UnlockedConstReaderAndAtomicRWRef() + .ProfilingStackRef(); + }, + nullptr); + Push(profilingStack, aLabel, aDynamicString, aCategoryPair, aFlags); + } + + // This is the AUTO_PROFILER_LABEL_FAST variant. It retrieves the + // ProfilingStack from the JSContext and does nothing if the profiler is + // inactive. + AutoProfilerLabel(JSContext* aJSContext, const char* aLabel, + const char* aDynamicString, + JS::ProfilingCategoryPair aCategoryPair, uint32_t aFlags) { + Push(js::GetContextProfilingStackIfEnabled(aJSContext), aLabel, + aDynamicString, aCategoryPair, aFlags); + } + + void Push(ProfilingStack* aProfilingStack, const char* aLabel, + const char* aDynamicString, JS::ProfilingCategoryPair aCategoryPair, + uint32_t aFlags = 0) { + // This function runs both on and off the main thread. + + mProfilingStack = aProfilingStack; + if (mProfilingStack) { + mProfilingStack->pushLabelFrame(aLabel, aDynamicString, this, + aCategoryPair, aFlags); + } + } + + ~AutoProfilerLabel() { + // This function runs both on and off the main thread. + + if (mProfilingStack) { + mProfilingStack->pop(); + } + } + + private: + // We save a ProfilingStack pointer in the ctor so we don't have to redo the + // TLS lookup in the dtor. + ProfilingStack* mProfilingStack; +}; + +#endif // !MOZ_GECKO_PROFILER + +} // namespace mozilla + +#endif // ProfilerLabels_h diff --git a/tools/profiler/public/ProfilerMarkerTypes.h b/tools/profiler/public/ProfilerMarkerTypes.h new file mode 100644 index 0000000000..0868c70e30 --- /dev/null +++ b/tools/profiler/public/ProfilerMarkerTypes.h @@ -0,0 +1,41 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerMarkerTypes_h +#define ProfilerMarkerTypes_h + +// This header contains common marker type definitions that rely on xpcom. +// +// It #include's "mozilla/BaseProfilerMarkerTypess.h" and "ProfilerMarkers.h", +// see these files for more marker types, how to define other marker types, and +// how to add markers to the profiler buffers. + +// !!! /!\ WORK IN PROGRESS /!\ !!! +// This file contains draft marker definitions, but most are not used yet. +// Further work is needed to complete these definitions, and use them to convert +// existing PROFILER_ADD_MARKER calls. See meta bug 1661394. + +#include "mozilla/BaseProfilerMarkerTypes.h" +#include "mozilla/ProfilerMarkers.h" +#include "js/ProfilingFrameIterator.h" +#include "js/Utility.h" +#include "mozilla/Preferences.h" +#include "mozilla/ServoTraversalStatistics.h" + +namespace geckoprofiler::markers { + +// Import some common markers from mozilla::baseprofiler::markers. +using MediaSampleMarker = mozilla::baseprofiler::markers::MediaSampleMarker; +using VideoFallingBehindMarker = + mozilla::baseprofiler::markers::VideoFallingBehindMarker; +using ContentBuildMarker = mozilla::baseprofiler::markers::ContentBuildMarker; +using MediaEngineMarker = mozilla::baseprofiler::markers::MediaEngineMarker; +using MediaEngineTextMarker = + mozilla::baseprofiler::markers::MediaEngineTextMarker; + +} // namespace geckoprofiler::markers + +#endif // ProfilerMarkerTypes_h diff --git a/tools/profiler/public/ProfilerMarkers.h b/tools/profiler/public/ProfilerMarkers.h new file mode 100644 index 0000000000..ca53c3f189 --- /dev/null +++ b/tools/profiler/public/ProfilerMarkers.h @@ -0,0 +1,355 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// Markers are useful to delimit something important happening such as the first +// paint. Unlike labels, which are only recorded in the profile buffer if a +// sample is collected while the label is on the label stack, markers will +// always be recorded in the profile buffer. +// +// This header contains definitions necessary to add markers to the Gecko +// Profiler buffer. +// +// It #include's "mozilla/BaseProfilerMarkers.h", see that header for base +// definitions necessary to create marker types. +// +// If common marker types are needed, #include "ProfilerMarkerTypes.h" instead. +// +// But if you want to create your own marker type locally, you can #include this +// header only; look at ProfilerMarkerTypes.h for examples of how to define +// types. +// +// To then record markers: +// - Use `baseprofiler::AddMarker(...)` from mozglue or other libraries that are +// outside of xul, especially if they may happen outside of xpcom's lifetime +// (typically startup, shutdown, or tests). +// - Otherwise #include "ProfilerMarkers.h" instead, and use +// `profiler_add_marker(...)`. +// See these functions for more details. + +#ifndef ProfilerMarkers_h +#define ProfilerMarkers_h + +#include "mozilla/BaseProfilerMarkers.h" +#include "mozilla/ProfilerMarkersDetail.h" +#include "mozilla/ProfilerLabels.h" +#include "nsJSUtils.h" // for nsJSUtils::GetCurrentlyRunningCodeInnerWindowID + +class nsIDocShell; + +namespace geckoprofiler::markers::detail { +// Please do not use anything from the detail namespace outside the profiler. + +#ifdef MOZ_GECKO_PROFILER +mozilla::Maybe profiler_get_inner_window_id_from_docshell( + nsIDocShell* aDocshell); +#else +inline mozilla::Maybe profiler_get_inner_window_id_from_docshell( + nsIDocShell* aDocshell) { + return mozilla::Nothing(); +} +#endif // MOZ_GECKO_PROFILER + +} // namespace geckoprofiler::markers::detail + +// This is a helper function to get the Inner Window ID from DocShell but it's +// not a recommended method to get it and it's not encouraged to use this +// function. If there is a computed inner window ID, `window`, or `Document` +// available in the call site, please use them. Use this function as a last +// resort. +inline mozilla::MarkerInnerWindowId MarkerInnerWindowIdFromDocShell( + nsIDocShell* aDocshell) { + mozilla::Maybe id = geckoprofiler::markers::detail:: + profiler_get_inner_window_id_from_docshell(aDocshell); + if (!id) { + return mozilla::MarkerInnerWindowId::NoId(); + } + return mozilla::MarkerInnerWindowId(*id); +} + +// This is a helper function to get the Inner Window ID from a JS Context but +// it's not a recommended method to get it and it's not encouraged to use this +// function. If there is a computed inner window ID, `window`, or `Document` +// available in the call site, please use them. Use this function as a last +// resort. +inline mozilla::MarkerInnerWindowId MarkerInnerWindowIdFromJSContext( + JSContext* aContext) { + return mozilla::MarkerInnerWindowId( + nsJSUtils::GetCurrentlyRunningCodeInnerWindowID(aContext)); +} + +// Bring category names from Base Profiler into the geckoprofiler::category +// namespace, for consistency with other Gecko Profiler identifiers. +namespace geckoprofiler::category { +using namespace ::mozilla::baseprofiler::category; +} + +#ifdef MOZ_GECKO_PROFILER +// Forward-declaration. TODO: Move to more common header, see bug 1681416. +bool profiler_capture_backtrace_into( + mozilla::ProfileChunkedBuffer& aChunkedBuffer, + mozilla::StackCaptureOptions aCaptureOptions); + +// Add a marker to a given buffer. `AddMarker()` and related macros should be +// used in most cases, see below for more information about them and the +// paramters; This function may be useful when markers need to be recorded in a +// local buffer outside of the main profiler buffer. +template +mozilla::ProfileBufferBlockIndex AddMarkerToBuffer( + mozilla::ProfileChunkedBuffer& aBuffer, + const mozilla::ProfilerString8View& aName, + const mozilla::MarkerCategory& aCategory, mozilla::MarkerOptions&& aOptions, + MarkerType aMarkerType, const PayloadArguments&... aPayloadArguments) { + AUTO_PROFILER_LABEL("AddMarkerToBuffer", PROFILER); + mozilla::Unused << aMarkerType; // Only the empty object type is useful. + return mozilla::base_profiler_markers_detail::AddMarkerToBuffer( + aBuffer, aName, aCategory, std::move(aOptions), + profiler_active_without_feature(ProfilerFeature::NoMarkerStacks) + ? ::profiler_capture_backtrace_into + : nullptr, + aPayloadArguments...); +} + +// Add a marker (without payload) to a given buffer. +inline mozilla::ProfileBufferBlockIndex AddMarkerToBuffer( + mozilla::ProfileChunkedBuffer& aBuffer, + const mozilla::ProfilerString8View& aName, + const mozilla::MarkerCategory& aCategory, + mozilla::MarkerOptions&& aOptions = {}) { + return AddMarkerToBuffer(aBuffer, aName, aCategory, std::move(aOptions), + mozilla::baseprofiler::markers::NoPayload{}); +} +#endif + +[[nodiscard]] inline bool profiler_thread_is_being_profiled_for_markers() { + return profiler_thread_is_being_profiled(ThreadProfilingFeatures::Markers); +} + +[[nodiscard]] inline bool profiler_thread_is_being_profiled_for_markers( + const ProfilerThreadId& aThreadId) { + return profiler_thread_is_being_profiled(aThreadId, + ThreadProfilingFeatures::Markers); +} + +// Add a marker to the Gecko Profiler buffer. +// - aName: Main name of this marker. +// - aCategory: Category for this marker. +// - aOptions: Optional settings (such as timing, inner window id, +// backtrace...), see `MarkerOptions` for details. +// - aMarkerType: Empty object that specifies the type of marker. +// - aPayloadArguments: Arguments expected by this marker type's +// ` StreamJSONMarkerData` function. +template +mozilla::ProfileBufferBlockIndex profiler_add_marker( + const mozilla::ProfilerString8View& aName, + const mozilla::MarkerCategory& aCategory, mozilla::MarkerOptions&& aOptions, + MarkerType aMarkerType, const PayloadArguments&... aPayloadArguments) { +#ifndef MOZ_GECKO_PROFILER + return {}; +#else + if (!profiler_thread_is_being_profiled_for_markers( + aOptions.ThreadId().ThreadId())) { + return {}; + } + AUTO_PROFILER_LABEL("profiler_add_marker", PROFILER); + return ::AddMarkerToBuffer(profiler_get_core_buffer(), aName, aCategory, + std::move(aOptions), aMarkerType, + aPayloadArguments...); +#endif +} + +// Add a marker (without payload) to the Gecko Profiler buffer. +inline mozilla::ProfileBufferBlockIndex profiler_add_marker( + const mozilla::ProfilerString8View& aName, + const mozilla::MarkerCategory& aCategory, + mozilla::MarkerOptions&& aOptions = {}) { + return profiler_add_marker(aName, aCategory, std::move(aOptions), + mozilla::baseprofiler::markers::NoPayload{}); +} + +// Same as `profiler_add_marker()` (without payload). This macro is safe to use +// even if MOZ_GECKO_PROFILER is not #defined. +#define PROFILER_MARKER_UNTYPED(markerName, categoryName, ...) \ + do { \ + AUTO_PROFILER_STATS(PROFILER_MARKER_UNTYPED); \ + ::profiler_add_marker(markerName, ::geckoprofiler::category::categoryName, \ + ##__VA_ARGS__); \ + } while (false) + +// Same as `profiler_add_marker()` (with payload). This macro is safe to use +// even if MOZ_GECKO_PROFILER is not #defined. +#define PROFILER_MARKER(markerName, categoryName, options, MarkerType, ...) \ + do { \ + AUTO_PROFILER_STATS(PROFILER_MARKER_with_##MarkerType); \ + ::profiler_add_marker(markerName, ::geckoprofiler::category::categoryName, \ + options, ::geckoprofiler::markers::MarkerType{}, \ + ##__VA_ARGS__); \ + } while (false) + +namespace geckoprofiler::markers { +// Most common marker types. Others are in ProfilerMarkerTypes.h. +using TextMarker = ::mozilla::baseprofiler::markers::TextMarker; +using Tracing = mozilla::baseprofiler::markers::Tracing; +} // namespace geckoprofiler::markers + +// Add a text marker. This macro is safe to use even if MOZ_GECKO_PROFILER is +// not #defined. +#define PROFILER_MARKER_TEXT(markerName, categoryName, options, text) \ + do { \ + AUTO_PROFILER_STATS(PROFILER_MARKER_TEXT); \ + ::profiler_add_marker(markerName, ::geckoprofiler::category::categoryName, \ + options, ::geckoprofiler::markers::TextMarker{}, \ + text); \ + } while (false) + +// RAII object that adds a PROFILER_MARKER_TEXT when destroyed; the marker's +// timing will be the interval from construction (unless an instant or start +// time is already specified in the provided options) until destruction. +class MOZ_RAII AutoProfilerTextMarker { + public: + AutoProfilerTextMarker(const char* aMarkerName, + const mozilla::MarkerCategory& aCategory, + mozilla::MarkerOptions&& aOptions, + const nsACString& aText) + : mMarkerName(aMarkerName), + mCategory(aCategory), + mOptions(std::move(aOptions)), + mText(aText) { + MOZ_ASSERT(mOptions.Timing().EndTime().IsNull(), + "AutoProfilerTextMarker options shouldn't have an end time"); + if (profiler_is_active_and_unpaused() && + mOptions.Timing().StartTime().IsNull()) { + mOptions.Set(mozilla::MarkerTiming::InstantNow()); + } + } + + ~AutoProfilerTextMarker() { + if (profiler_is_active_and_unpaused()) { + AUTO_PROFILER_LABEL("TextMarker", PROFILER); + mOptions.TimingRef().SetIntervalEnd(); + AUTO_PROFILER_STATS(AUTO_PROFILER_MARKER_TEXT); + profiler_add_marker( + mozilla::ProfilerString8View::WrapNullTerminatedString(mMarkerName), + mCategory, std::move(mOptions), geckoprofiler::markers::TextMarker{}, + mText); + } + } + + protected: + const char* mMarkerName; + mozilla::MarkerCategory mCategory; + mozilla::MarkerOptions mOptions; + nsCString mText; +}; + +// Creates an AutoProfilerTextMarker RAII object. This macro is safe to use +// even if MOZ_GECKO_PROFILER is not #defined. +#define AUTO_PROFILER_MARKER_TEXT(markerName, categoryName, options, text) \ + AutoProfilerTextMarker PROFILER_RAII( \ + markerName, ::mozilla::baseprofiler::category::categoryName, options, \ + text) + +class MOZ_RAII AutoProfilerTracing { + public: + AutoProfilerTracing(const char* aCategoryString, const char* aMarkerName, + mozilla::MarkerCategory aCategoryPair, + const mozilla::Maybe& aInnerWindowID) + : mCategoryString(aCategoryString), + mMarkerName(aMarkerName), + mCategoryPair(aCategoryPair), + mInnerWindowID(aInnerWindowID) { + profiler_add_marker( + mozilla::ProfilerString8View::WrapNullTerminatedString(mMarkerName), + mCategoryPair, + {mozilla::MarkerTiming::IntervalStart(), + mozilla::MarkerInnerWindowId(mInnerWindowID)}, + geckoprofiler::markers::Tracing{}, + mozilla::ProfilerString8View::WrapNullTerminatedString( + mCategoryString)); + } + + AutoProfilerTracing( + const char* aCategoryString, const char* aMarkerName, + mozilla::MarkerCategory aCategoryPair, + mozilla::UniquePtr aBacktrace, + const mozilla::Maybe& aInnerWindowID) + : mCategoryString(aCategoryString), + mMarkerName(aMarkerName), + mCategoryPair(aCategoryPair), + mInnerWindowID(aInnerWindowID) { + profiler_add_marker( + mozilla::ProfilerString8View::WrapNullTerminatedString(mMarkerName), + mCategoryPair, + {mozilla::MarkerTiming::IntervalStart(), + mozilla::MarkerInnerWindowId(mInnerWindowID), + mozilla::MarkerStack::TakeBacktrace(std::move(aBacktrace))}, + geckoprofiler::markers::Tracing{}, + mozilla::ProfilerString8View::WrapNullTerminatedString( + mCategoryString)); + } + + ~AutoProfilerTracing() { + profiler_add_marker( + mozilla::ProfilerString8View::WrapNullTerminatedString(mMarkerName), + mCategoryPair, + {mozilla::MarkerTiming::IntervalEnd(), + mozilla::MarkerInnerWindowId(mInnerWindowID)}, + geckoprofiler::markers::Tracing{}, + mozilla::ProfilerString8View::WrapNullTerminatedString( + mCategoryString)); + } + + protected: + const char* mCategoryString; + const char* mMarkerName; + const mozilla::MarkerCategory mCategoryPair; + const mozilla::Maybe mInnerWindowID; +}; + +// Adds a START/END pair of tracing markers. +#define AUTO_PROFILER_TRACING_MARKER(categoryString, markerName, categoryPair) \ + AutoProfilerTracing PROFILER_RAII(categoryString, markerName, \ + geckoprofiler::category::categoryPair, \ + mozilla::Nothing()) +#define AUTO_PROFILER_TRACING_MARKER_INNERWINDOWID( \ + categoryString, markerName, categoryPair, innerWindowId) \ + AutoProfilerTracing PROFILER_RAII(categoryString, markerName, \ + geckoprofiler::category::categoryPair, \ + mozilla::Some(innerWindowId)) +#define AUTO_PROFILER_TRACING_MARKER_DOCSHELL(categoryString, markerName, \ + categoryPair, docShell) \ + AutoProfilerTracing PROFILER_RAII( \ + categoryString, markerName, geckoprofiler::category::categoryPair, \ + geckoprofiler::markers::detail:: \ + profiler_get_inner_window_id_from_docshell(docShell)) + +#ifdef MOZ_GECKO_PROFILER +extern template mozilla::ProfileBufferBlockIndex AddMarkerToBuffer( + mozilla::ProfileChunkedBuffer&, const mozilla::ProfilerString8View&, + const mozilla::MarkerCategory&, mozilla::MarkerOptions&&, + mozilla::baseprofiler::markers::NoPayload); + +extern template mozilla::ProfileBufferBlockIndex AddMarkerToBuffer( + mozilla::ProfileChunkedBuffer&, const mozilla::ProfilerString8View&, + const mozilla::MarkerCategory&, mozilla::MarkerOptions&&, + mozilla::baseprofiler::markers::TextMarker, const std::string&); + +extern template mozilla::ProfileBufferBlockIndex profiler_add_marker( + const mozilla::ProfilerString8View&, const mozilla::MarkerCategory&, + mozilla::MarkerOptions&&, mozilla::baseprofiler::markers::TextMarker, + const std::string&); + +extern template mozilla::ProfileBufferBlockIndex profiler_add_marker( + const mozilla::ProfilerString8View&, const mozilla::MarkerCategory&, + mozilla::MarkerOptions&&, mozilla::baseprofiler::markers::TextMarker, + const nsCString&); + +extern template mozilla::ProfileBufferBlockIndex profiler_add_marker( + const mozilla::ProfilerString8View&, const mozilla::MarkerCategory&, + mozilla::MarkerOptions&&, mozilla::baseprofiler::markers::Tracing, + const mozilla::ProfilerString8View&); +#endif // MOZ_GECKO_PROFILER + +#endif // ProfilerMarkers_h diff --git a/tools/profiler/public/ProfilerMarkersDetail.h b/tools/profiler/public/ProfilerMarkersDetail.h new file mode 100644 index 0000000000..2308a14bb2 --- /dev/null +++ b/tools/profiler/public/ProfilerMarkersDetail.h @@ -0,0 +1,31 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerMarkersDetail_h +#define ProfilerMarkersDetail_h + +#ifndef ProfilerMarkers_h +# error "This header should only be #included by ProfilerMarkers.h" +#endif + +#include "mozilla/ProfilerMarkersPrerequisites.h" + +#ifdef MOZ_GECKO_PROFILER + +// ~~ HERE BE DRAGONS ~~ +// +// Everything below is internal implementation detail, you shouldn't need to +// look at it unless working on the profiler code. + +// Header that specializes the (de)serializers for xpcom types. +# include "mozilla/ProfileBufferEntrySerializationGeckoExtensions.h" + +// Implemented in platform.cpp +mozilla::ProfileChunkedBuffer& profiler_get_core_buffer(); + +#endif // MOZ_GECKO_PROFILER + +#endif // ProfilerMarkersDetail_h diff --git a/tools/profiler/public/ProfilerMarkersPrerequisites.h b/tools/profiler/public/ProfilerMarkersPrerequisites.h new file mode 100644 index 0000000000..0f10f7efe2 --- /dev/null +++ b/tools/profiler/public/ProfilerMarkersPrerequisites.h @@ -0,0 +1,31 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// This header contains basic definitions required to create marker types, and +// to add markers to the profiler buffers. +// +// In most cases, #include "mozilla/ProfilerMarkers.h" instead, or +// #include "mozilla/ProfilerMarkerTypes.h" for common marker types. + +#ifndef ProfilerMarkersPrerequisites_h +#define ProfilerMarkersPrerequisites_h + +#include "mozilla/BaseProfilerMarkersPrerequisites.h" +#include "mozilla/ProfilerThreadState.h" + +#ifdef MOZ_GECKO_PROFILER + +namespace geckoprofiler::markers { + +// Default marker payload types, with no extra information, not even a marker +// type and payload. This is intended for label-only markers. +using NoPayload = ::mozilla::baseprofiler::markers::NoPayload; + +} // namespace geckoprofiler::markers + +#endif // MOZ_GECKO_PROFILER + +#endif // ProfilerMarkersPrerequisites_h diff --git a/tools/profiler/public/ProfilerParent.h b/tools/profiler/public/ProfilerParent.h new file mode 100644 index 0000000000..8bd5c71721 --- /dev/null +++ b/tools/profiler/public/ProfilerParent.h @@ -0,0 +1,119 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerParent_h +#define ProfilerParent_h + +#include "mozilla/PProfilerParent.h" +#include "mozilla/RefPtr.h" + +class nsIProfilerStartParams; + +namespace mozilla { + +class ProfileBufferGlobalController; +class ProfilerParentTracker; + +// This is the main process side of the PProfiler protocol. +// ProfilerParent instances only exist on the main thread of the main process. +// The other side (ProfilerChild) lives on a background thread in the other +// process. +// The creation of PProfiler actors is initiated from the main process, after +// the other process has been launched. +// ProfilerParent instances are destroyed once the message channel closes, +// which can be triggered by either process, depending on which one shuts down +// first. +// All ProfilerParent instances are registered with a manager class called +// ProfilerParentTracker, which has the list of living ProfilerParent instances +// and handles shutdown. +class ProfilerParent final : public PProfilerParent { + public: + NS_INLINE_DECL_REFCOUNTING(ProfilerParent, final) + + static mozilla::ipc::Endpoint CreateForProcess( + base::ProcessId aOtherPid); + +#ifdef MOZ_GECKO_PROFILER + using SingleProcessProfilePromise = + MozPromise; + + struct SingleProcessProfilePromiseAndChildPid { + RefPtr profilePromise; + base::ProcessId childPid; + }; + + using SingleProcessProgressPromise = + MozPromise; + + // The following static methods can be called on any thread, but they are + // no-ops on anything other than the main thread. + // If called on the main thread, the call will be broadcast to all + // registered processes (all processes for which we have a ProfilerParent + // object). + // At the moment, the main process always calls these methods on the main + // thread, and that's the only process in which we need to forward these + // calls to other processes. The other processes will call these methods on + // the ProfilerChild background thread, but those processes don't need to + // forward these calls any further. + + // Returns the profiles to expect, as promises and child pids. + static nsTArray GatherProfiles(); + + // Send a request to get the GatherProfiles() progress update from one child + // process, returns a promise to be resolved with that progress. + // The promise RefPtr may be null if the child process is unknown. + // Progress may be invalid, if the request arrived after the child process + // had already responded to the main GatherProfile() IPC, or something went + // very wrong in that process. + static RefPtr RequestGatherProfileProgress( + base::ProcessId aChildPid); + + // This will start the profiler in all child processes. The returned promise + // will be resolved when all child have completed their operation + // (successfully or not.) + [[nodiscard]] static RefPtr ProfilerStarted( + nsIProfilerStartParams* aParams); + static void ProfilerWillStopIfStarted(); + [[nodiscard]] static RefPtr ProfilerStopped(); + [[nodiscard]] static RefPtr ProfilerPaused(); + [[nodiscard]] static RefPtr ProfilerResumed(); + [[nodiscard]] static RefPtr ProfilerPausedSampling(); + [[nodiscard]] static RefPtr ProfilerResumedSampling(); + static void ClearAllPages(); + + [[nodiscard]] static RefPtr WaitOnePeriodicSampling(); + + // Create a "Final" update that the Child can return to its Parent. + static ProfileBufferChunkManagerUpdate MakeFinalUpdate(); + + // True if the ProfilerParent holds a lock on this thread. + static bool IsLockedOnCurrentThread(); + + private: + friend class ProfileBufferGlobalController; + friend class ProfilerParentTracker; + + explicit ProfilerParent(base::ProcessId aChildPid); + + void Init(); + void ActorDestroy(ActorDestroyReason aActorDestroyReason) override; + + void RequestChunkManagerUpdate(); + + base::ProcessId mChildPid; + nsTArray> + mPendingRequestedProfiles; + bool mDestroyed; +#endif // MOZ_GECKO_PROFILER + + private: + virtual ~ProfilerParent(); +}; + +} // namespace mozilla + +#endif // ProfilerParent_h diff --git a/tools/profiler/public/ProfilerRunnable.h b/tools/profiler/public/ProfilerRunnable.h new file mode 100644 index 0000000000..b3b4e64043 --- /dev/null +++ b/tools/profiler/public/ProfilerRunnable.h @@ -0,0 +1,68 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerRunnable_h +#define ProfilerRunnable_h + +#include "GeckoProfiler.h" +#include "nsIThreadPool.h" + +#if !defined(MOZ_GECKO_PROFILER) || !defined(MOZ_COLLECTING_RUNNABLE_TELEMETRY) +# define AUTO_PROFILE_FOLLOWING_RUNNABLE(runnable) +#else +# define AUTO_PROFILE_FOLLOWING_RUNNABLE(runnable) \ + mozilla::Maybe raiiRunnableMarker; \ + if (profiler_thread_is_being_profiled_for_markers()) { \ + raiiRunnableMarker.emplace(runnable); \ + } + +namespace mozilla { + +class MOZ_RAII AutoProfileRunnable { + public: + explicit AutoProfileRunnable(Runnable* aRunnable) + : mStartTime(TimeStamp::Now()) { + aRunnable->GetName(mName); + } + explicit AutoProfileRunnable(nsIRunnable* aRunnable) + : mStartTime(TimeStamp::Now()) { + nsCOMPtr threadPool = do_QueryInterface(aRunnable); + if (threadPool) { + // nsThreadPool::Run has its own call to AUTO_PROFILE_FOLLOWING_RUNNABLE, + // avoid nesting runnable markers. + return; + } + + nsCOMPtr named = do_QueryInterface(aRunnable); + if (named) { + named->GetName(mName); + } + } + explicit AutoProfileRunnable(nsACString& aName) + : mStartTime(TimeStamp::Now()), mName(aName) {} + + ~AutoProfileRunnable() { + if (mName.IsEmpty()) { + return; + } + + AUTO_PROFILER_LABEL("AutoProfileRunnable", PROFILER); + AUTO_PROFILER_STATS(AUTO_PROFILE_RUNNABLE); + profiler_add_marker("Runnable", ::mozilla::baseprofiler::category::OTHER, + MarkerTiming::IntervalUntilNowFrom(mStartTime), + geckoprofiler::markers::TextMarker{}, mName); + } + + protected: + TimeStamp mStartTime; + nsAutoCString mName; +}; + +} // namespace mozilla + +#endif + +#endif // ProfilerRunnable_h diff --git a/tools/profiler/public/ProfilerRustBindings.h b/tools/profiler/public/ProfilerRustBindings.h new file mode 100644 index 0000000000..bf290838a1 --- /dev/null +++ b/tools/profiler/public/ProfilerRustBindings.h @@ -0,0 +1,12 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef ProfilerRustBindings_h +#define ProfilerRustBindings_h + +#include "mozilla/profiler_ffi_generated.h" + +// Add any non-generated support code here + +#endif // ProfilerRustBindings_h diff --git a/tools/profiler/public/ProfilerState.h b/tools/profiler/public/ProfilerState.h new file mode 100644 index 0000000000..7a9f3f5c73 --- /dev/null +++ b/tools/profiler/public/ProfilerState.h @@ -0,0 +1,399 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// This header contains most functions that give information about the Profiler: +// Whether it is active or not, paused, and the selected features. +// It is safe to include unconditionally, but uses of structs and functions must +// be guarded by `#ifdef MOZ_GECKO_PROFILER`. + +#ifndef ProfilerState_h +#define ProfilerState_h + +#include +#include +#include "mozilla/ProfilerUtils.h" + +#include + +//--------------------------------------------------------------------------- +// Profiler features +//--------------------------------------------------------------------------- + +#if defined(__APPLE__) && defined(__aarch64__) +# define POWER_HELP "Sample per process power use" +#elif defined(__APPLE__) && defined(__x86_64__) +# define POWER_HELP \ + "Record the power used by the entire system with each sample." +#elif defined(__linux__) && defined(__x86_64__) +# define POWER_HELP \ + "Record the power used by the entire system with each sample. " \ + "Only available with Intel CPUs and requires setting " \ + "the sysctl kernel.perf_event_paranoid to 0." + +#elif defined(_MSC_VER) +# define POWER_HELP \ + "Record the value of every energy meter available on the system with " \ + "each sample. Only available on Windows 11 with Intel CPUs." +#else +# define POWER_HELP "Not supported on this platform." +#endif + +// Higher-order macro containing all the feature info in one place. Define +// |MACRO| appropriately to extract the relevant parts. Note that the number +// values are used internally only and so can be changed without consequence. +// Any changes to this list should also be applied to the feature list in +// toolkit/components/extensions/schemas/geckoProfiler.json. +// *** Synchronize with lists in BaseProfilerState.h and geckoProfiler.json *** +#define PROFILER_FOR_EACH_FEATURE(MACRO) \ + MACRO(0, "java", Java, "Profile Java code, Android only") \ + \ + MACRO(1, "js", JS, \ + "Get the JS engine to expose the JS stack to the profiler") \ + \ + MACRO(2, "mainthreadio", MainThreadIO, "Add main thread file I/O") \ + \ + MACRO(3, "fileio", FileIO, \ + "Add file I/O from all profiled threads, implies mainthreadio") \ + \ + MACRO(4, "fileioall", FileIOAll, \ + "Add file I/O from all threads, implies fileio") \ + \ + MACRO(5, "nomarkerstacks", NoMarkerStacks, \ + "Markers do not capture stacks, to reduce overhead") \ + \ + MACRO(6, "screenshots", Screenshots, \ + "Take a snapshot of the window on every composition") \ + \ + MACRO(7, "seqstyle", SequentialStyle, \ + "Disable parallel traversal in styling") \ + \ + MACRO(8, "stackwalk", StackWalk, \ + "Walk the C++ stack, not available on all platforms") \ + \ + MACRO(9, "jsallocations", JSAllocations, \ + "Have the JavaScript engine track allocations") \ + \ + MACRO(10, "nostacksampling", NoStackSampling, \ + "Disable all stack sampling: Cancels \"js\", \"stackwalk\" and " \ + "labels") \ + \ + MACRO(11, "nativeallocations", NativeAllocations, \ + "Collect the stacks from a smaller subset of all native " \ + "allocations, biasing towards collecting larger allocations") \ + \ + MACRO(12, "ipcmessages", IPCMessages, \ + "Have the IPC layer track cross-process messages") \ + \ + MACRO(13, "audiocallbacktracing", AudioCallbackTracing, \ + "Audio callback tracing") \ + \ + MACRO(14, "cpu", CPUUtilization, "CPU utilization") \ + \ + MACRO(15, "notimerresolutionchange", NoTimerResolutionChange, \ + "Do not adjust the timer resolution for sampling, so that other " \ + "Firefox timers do not get affected") \ + \ + MACRO(16, "cpuallthreads", CPUAllThreads, \ + "Sample the CPU utilization of all registered threads") \ + \ + MACRO(17, "samplingallthreads", SamplingAllThreads, \ + "Sample the stacks of all registered threads") \ + \ + MACRO(18, "markersallthreads", MarkersAllThreads, \ + "Record markers from all registered threads") \ + \ + MACRO(19, "unregisteredthreads", UnregisteredThreads, \ + "Discover and profile unregistered threads -- beware: expensive!") \ + \ + MACRO(20, "processcpu", ProcessCPU, \ + "Sample the CPU utilization of each process") \ + \ + MACRO(21, "power", Power, POWER_HELP) +// *** Synchronize with lists in BaseProfilerState.h and geckoProfiler.json *** + +struct ProfilerFeature { +#define DECLARE(n_, str_, Name_, desc_) \ + static constexpr uint32_t Name_ = (1u << n_); \ + [[nodiscard]] static constexpr bool Has##Name_(uint32_t aFeatures) { \ + return aFeatures & Name_; \ + } \ + static constexpr void Set##Name_(uint32_t& aFeatures) { \ + aFeatures |= Name_; \ + } \ + static constexpr void Clear##Name_(uint32_t& aFeatures) { \ + aFeatures &= ~Name_; \ + } + + // Define a bitfield constant, a getter, and two setters for each feature. + PROFILER_FOR_EACH_FEATURE(DECLARE) + +#undef DECLARE +}; + +// clang-format off +MOZ_DEFINE_ENUM_CLASS(ProfilingState,( + // A callback will be invoked ... + AlreadyActive, // if the profiler is active when the callback is added. + RemovingCallback, // when the callback is removed. + Started, // after the profiler has started. + Pausing, // before the profiler is paused. + Resumed, // after the profiler has resumed. + GeneratingProfile, // before a profile is created. + Stopping, // before the profiler stops (unless restarting afterward). + ShuttingDown // before the profiler is shut down. +)); +// clang-format on + +[[nodiscard]] inline static const char* ProfilingStateToString( + ProfilingState aProfilingState) { + switch (aProfilingState) { + case ProfilingState::AlreadyActive: + return "Profiler already active"; + case ProfilingState::RemovingCallback: + return "Callback being removed"; + case ProfilingState::Started: + return "Profiler started"; + case ProfilingState::Pausing: + return "Profiler pausing"; + case ProfilingState::Resumed: + return "Profiler resumed"; + case ProfilingState::GeneratingProfile: + return "Generating profile"; + case ProfilingState::Stopping: + return "Profiler stopping"; + case ProfilingState::ShuttingDown: + return "Profiler shutting down"; + default: + MOZ_ASSERT_UNREACHABLE("Unexpected ProfilingState enum value"); + return "?"; + } +} + +using ProfilingStateSet = mozilla::EnumSet; + +[[nodiscard]] constexpr ProfilingStateSet AllProfilingStates() { + ProfilingStateSet set; + using Value = std::underlying_type_t; + for (Value stateValue = 0; + stateValue <= static_cast(kHighestProfilingState); ++stateValue) { + set += static_cast(stateValue); + } + return set; +} + +// Type of callbacks to be invoked at certain state changes. +// It must NOT call profiler_add/remove_state_change_callback(). +using ProfilingStateChangeCallback = std::function; + +#ifndef MOZ_GECKO_PROFILER + +[[nodiscard]] inline bool profiler_is_active() { return false; } +[[nodiscard]] inline bool profiler_is_active_and_unpaused() { return false; } +[[nodiscard]] inline bool profiler_feature_active(uint32_t aFeature) { + return false; +} +[[nodiscard]] inline bool profiler_is_locked_on_current_thread() { + return false; +} +inline void profiler_add_state_change_callback( + ProfilingStateSet aProfilingStateSet, + ProfilingStateChangeCallback&& aCallback, uintptr_t aUniqueIdentifier = 0) { +} +inline void profiler_remove_state_change_callback(uintptr_t aUniqueIdentifier) { +} + +#else // !MOZ_GECKO_PROFILER + +# include "mozilla/Atomics.h" +# include "mozilla/Maybe.h" + +# include + +namespace mozilla::profiler::detail { + +// RacyFeatures is only defined in this header file so that its methods can +// be inlined into profiler_is_active(). Please do not use anything from the +// detail namespace outside the profiler. + +// Within the profiler's code, the preferred way to check profiler activeness +// and features is via ActivePS(). However, that requires locking gPSMutex. +// There are some hot operations where absolute precision isn't required, so we +// duplicate the activeness/feature state in a lock-free manner in this class. +class RacyFeatures { + public: + static void SetActive(uint32_t aFeatures) { + sActiveAndFeatures = Active | aFeatures; + } + + static void SetInactive() { sActiveAndFeatures = 0; } + + static void SetPaused() { sActiveAndFeatures |= Paused; } + + static void SetUnpaused() { sActiveAndFeatures &= ~Paused; } + + static void SetSamplingPaused() { sActiveAndFeatures |= SamplingPaused; } + + static void SetSamplingUnpaused() { sActiveAndFeatures &= ~SamplingPaused; } + + [[nodiscard]] static mozilla::Maybe FeaturesIfActive() { + if (uint32_t af = sActiveAndFeatures; af & Active) { + // Active, remove the Active&Paused bits to get all features. + return Some(af & ~(Active | Paused | SamplingPaused)); + } + return Nothing(); + } + + [[nodiscard]] static mozilla::Maybe FeaturesIfActiveAndUnpaused() { + if (uint32_t af = sActiveAndFeatures; (af & (Active | Paused)) == Active) { + // Active but not fully paused, remove the Active and sampling-paused bits + // to get all features. + return Some(af & ~(Active | SamplingPaused)); + } + return Nothing(); + } + + // This implementation must be kept in sync with `gecko_profiler::is_active` + // in the Profiler Rust API. + [[nodiscard]] static bool IsActive() { + return uint32_t(sActiveAndFeatures) & Active; + } + + [[nodiscard]] static bool IsActiveWithFeature(uint32_t aFeature) { + uint32_t af = sActiveAndFeatures; // copy it first + return (af & Active) && (af & aFeature); + } + + [[nodiscard]] static bool IsActiveWithoutFeature(uint32_t aFeature) { + uint32_t af = sActiveAndFeatures; // copy it first + return (af & Active) && !(af & aFeature); + } + + // True if profiler is active, and not fully paused. + // Note that periodic sampling *could* be paused! + // This implementation must be kept in sync with + // `gecko_profiler::can_accept_markers` in the Profiler Rust API. + [[nodiscard]] static bool IsActiveAndUnpaused() { + uint32_t af = sActiveAndFeatures; // copy it first + return (af & Active) && !(af & Paused); + } + + // True if profiler is active, and sampling is not paused (though generic + // `SetPaused()` or specific `SetSamplingPaused()`). + [[nodiscard]] static bool IsActiveAndSamplingUnpaused() { + uint32_t af = sActiveAndFeatures; // copy it first + return (af & Active) && !(af & (Paused | SamplingPaused)); + } + + private: + static constexpr uint32_t Active = 1u << 31; + static constexpr uint32_t Paused = 1u << 30; + static constexpr uint32_t SamplingPaused = 1u << 29; + +// Ensure Active/Paused don't overlap with any of the feature bits. +# define NO_OVERLAP(n_, str_, Name_, desc_) \ + static_assert(ProfilerFeature::Name_ != SamplingPaused, \ + "bad feature value"); + + PROFILER_FOR_EACH_FEATURE(NO_OVERLAP); + +# undef NO_OVERLAP + + // We combine the active bit with the feature bits so they can be read or + // written in a single atomic operation. Accesses to this atomic are not + // recorded by web replay as they may occur at non-deterministic points. + static mozilla::Atomic + sActiveAndFeatures; +}; + +} // namespace mozilla::profiler::detail + +//--------------------------------------------------------------------------- +// Get information from the profiler +//--------------------------------------------------------------------------- + +// Is the profiler active? Note: the return value of this function can become +// immediately out-of-date. E.g. the profile might be active but then +// profiler_stop() is called immediately afterward. One common and reasonable +// pattern of usage is the following: +// +// if (profiler_is_active()) { +// ExpensiveData expensiveData = CreateExpensiveData(); +// PROFILER_OPERATION(expensiveData); +// } +// +// where PROFILER_OPERATION is a no-op if the profiler is inactive. In this +// case the profiler_is_active() check is just an optimization -- it prevents +// us calling CreateExpensiveData() unnecessarily in most cases, but the +// expensive data will end up being created but not used if another thread +// stops the profiler between the CreateExpensiveData() and PROFILER_OPERATION +// calls. +[[nodiscard]] inline bool profiler_is_active() { + return mozilla::profiler::detail::RacyFeatures::IsActive(); +} + +// Same as profiler_is_active(), but also checks if the profiler is not paused. +[[nodiscard]] inline bool profiler_is_active_and_unpaused() { + return mozilla::profiler::detail::RacyFeatures::IsActiveAndUnpaused(); +} + +// Is the profiler active and paused? Returns false if the profiler is inactive. +[[nodiscard]] bool profiler_is_paused(); + +// Is the profiler active and sampling is paused? Returns false if the profiler +// is inactive. +[[nodiscard]] bool profiler_is_sampling_paused(); + +// Get all the features supported by the profiler that are accepted by +// profiler_start(). The result is the same whether the profiler is active or +// not. +[[nodiscard]] uint32_t profiler_get_available_features(); + +// Returns the full feature set if the profiler is active. +// Note: the return value can become immediately out-of-date, much like the +// return value of profiler_is_active(). +[[nodiscard]] inline mozilla::Maybe profiler_features_if_active() { + return mozilla::profiler::detail::RacyFeatures::FeaturesIfActive(); +} + +// Returns the full feature set if the profiler is active and unpaused. +// Note: the return value can become immediately out-of-date, much like the +// return value of profiler_is_active(). +[[nodiscard]] inline mozilla::Maybe +profiler_features_if_active_and_unpaused() { + return mozilla::profiler::detail::RacyFeatures::FeaturesIfActiveAndUnpaused(); +} + +// Check if a profiler feature (specified via the ProfilerFeature type) is +// active. Returns false if the profiler is inactive. Note: the return value +// can become immediately out-of-date, much like the return value of +// profiler_is_active(). +[[nodiscard]] bool profiler_feature_active(uint32_t aFeature); + +// Check if the profiler is active without a feature (specified via the +// ProfilerFeature type). Note: the return value can become immediately +// out-of-date, much like the return value of profiler_is_active(). +[[nodiscard]] bool profiler_active_without_feature(uint32_t aFeature); + +// Returns true if any of the profiler mutexes are currently locked *on the +// current thread*. This may be used by re-entrant code that may call profiler +// functions while the same of a different profiler mutex is locked, which could +// deadlock. +[[nodiscard]] bool profiler_is_locked_on_current_thread(); + +// Install a callback to be invoked at any of the given profiling state changes. +// An optional non-zero identifier may be given, to allow later removal of the +// callback, the caller is responsible for making sure it's really unique (e.g., +// by using a pointer to an object it owns.) +void profiler_add_state_change_callback( + ProfilingStateSet aProfilingStateSet, + ProfilingStateChangeCallback&& aCallback, uintptr_t aUniqueIdentifier = 0); + +// Remove the callback with the given non-zero identifier. +void profiler_remove_state_change_callback(uintptr_t aUniqueIdentifier); + +#endif // MOZ_GECKO_PROFILER + +#endif // ProfilerState_h diff --git a/tools/profiler/public/ProfilerThreadPlatformData.h b/tools/profiler/public/ProfilerThreadPlatformData.h new file mode 100644 index 0000000000..c243a8ee02 --- /dev/null +++ b/tools/profiler/public/ProfilerThreadPlatformData.h @@ -0,0 +1,80 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerThreadPlatformData_h +#define ProfilerThreadPlatformData_h + +#include "mozilla/ProfilerUtils.h" + +#if defined(__APPLE__) +# include +#elif defined(__linux__) || defined(__ANDROID__) || defined(__FreeBSD__) +# include "mozilla/Maybe.h" +# include +#endif + +namespace mozilla::profiler { + +class PlatformData { +#if (defined(_MSC_VER) || defined(__MINGW32__)) && defined(MOZ_GECKO_PROFILER) + public: + explicit PlatformData(ProfilerThreadId aThreadId); + ~PlatformData(); + + // Faking win32's HANDLE, because #including "windows.h" here causes trouble + // (e.g., it #defines `Yield` as nothing!) + // This type is static_check'ed against HANDLE in platform-win32.cpp. + using WindowsHandle = void*; + WindowsHandle ProfiledThread() const { return mProfiledThread; } + + private: + WindowsHandle mProfiledThread; +#elif defined(__APPLE__) && defined(MOZ_GECKO_PROFILER) + public: + explicit PlatformData(ProfilerThreadId aThreadId); + ~PlatformData(); + thread_act_t ProfiledThread() const { return mProfiledThread; } + + private: + // Note: for mProfiledThread Mach primitives are used instead of pthread's + // because the latter doesn't provide thread manipulation primitives + // required. For details, consult "Mac OS X Internals" book, Section 7.3. + thread_act_t mProfiledThread; +#elif (defined(__linux__) || defined(__ANDROID__) || defined(__FreeBSD__)) && \ + defined(MOZ_GECKO_PROFILER) + public: + explicit PlatformData(ProfilerThreadId aThreadId); + ~PlatformData(); + // Clock Id for this profiled thread. `Nothing` if `pthread_getcpuclockid` + // failed (e.g., if the system doesn't support per-thread clocks). + Maybe GetClockId() const { return mClockId; } + + private: + Maybe mClockId; +#else + public: + explicit PlatformData(ProfilerThreadId aThreadId) {} +#endif +}; + +/** + * Return the number of nanoseconds of CPU time used since thread start. + * + * @return true on success. + */ +#if defined(MOZ_GECKO_PROFILER) +bool GetCpuTimeSinceThreadStartInNs(uint64_t* aResult, + const PlatformData& aPlatformData); +#else +static inline bool GetCpuTimeSinceThreadStartInNs( + uint64_t* aResult, const PlatformData& aPlatformData) { + return false; +} +#endif + +} // namespace mozilla::profiler + +#endif // ProfilerThreadPlatformData_h diff --git a/tools/profiler/public/ProfilerThreadRegistration.h b/tools/profiler/public/ProfilerThreadRegistration.h new file mode 100644 index 0000000000..3fb931987d --- /dev/null +++ b/tools/profiler/public/ProfilerThreadRegistration.h @@ -0,0 +1,367 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerThreadRegistration_h +#define ProfilerThreadRegistration_h + +#include "mozilla/BaseProfilerDetail.h" +#include "mozilla/ProfilerThreadRegistrationData.h" +#include "mozilla/ThreadLocal.h" + +namespace mozilla::profiler { + +class ThreadRegistry; + +// To use as RAII object, or through RegisterThread/UnregisterThread. +// Automatically registers itself with TLS and Profiler. +// It can be safely nested, but nested instances are just ignored. +// See Get.../With... functions for how to access the data. +class ThreadRegistration { + private: + using DataMutex = baseprofiler::detail::BaseProfilerMutex; + using DataLock = baseprofiler::detail::BaseProfilerAutoLock; + + public: + // Constructor to use as RAII auto-registration object. + // It stores itself in the TLS (its effective owner), and gives its pointer to + // the Profiler. + ThreadRegistration(const char* aName, const void* aStackTop); + + // Destruction reverses construction: Remove pointer from the Profiler (except + // for the main thread, because it should be done by the profiler itself) and + // from the TLS. + ~ThreadRegistration(); + + // Manual construction&destruction, if RAII is not possible or too expensive + // in stack space. + // RegisterThread() *must* be paired with exactly one UnregisterThread() on + // the same thread. (Extra UnregisterThread() calls are handled safely, but + // they may cause profiling of this thread to stop earlier than expected.) + static ProfilingStack* RegisterThread(const char* aName, + const void* aStackTop); + static void UnregisterThread(); + + [[nodiscard]] static bool IsRegistered() { return GetFromTLS(); } + + // Prevent copies&moves. + ThreadRegistration(const ThreadRegistration&) = delete; + ThreadRegistration& operator=(const ThreadRegistration&) = delete; + + // Aliases to data accessors (removing the ThreadRegistration prefix). + + using UnlockedConstReader = ThreadRegistrationUnlockedConstReader; + using UnlockedConstReaderAndAtomicRW = + ThreadRegistrationUnlockedConstReaderAndAtomicRW; + using UnlockedRWForLockedProfiler = + ThreadRegistrationUnlockedRWForLockedProfiler; + using UnlockedReaderAndAtomicRWOnThread = + ThreadRegistrationUnlockedReaderAndAtomicRWOnThread; + using LockedRWFromAnyThread = ThreadRegistrationLockedRWFromAnyThread; + using LockedRWOnThread = ThreadRegistrationLockedRWOnThread; + + // On-thread access from the TLS, providing the following data accessors: + // UnlockedConstReader, UnlockedConstReaderAndAtomicRW, + // UnlockedRWForLockedProfiler, UnlockedReaderAndAtomicRWOnThread, and + // LockedRWOnThread. + // (See ThreadRegistry class for OFF-thread access.) + + // Reference-like class pointing at the ThreadRegistration for the current + // thread. + class OnThreadRef { + public: + // const UnlockedConstReader + + [[nodiscard]] const UnlockedConstReader& UnlockedConstReaderCRef() const { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedConstReader(F&& aF) const { + return std::forward(aF)(UnlockedConstReaderCRef()); + } + + // const UnlockedConstReaderAndAtomicRW + + [[nodiscard]] const UnlockedConstReaderAndAtomicRW& + UnlockedConstReaderAndAtomicRWCRef() const { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedConstReaderAndAtomicRW(F&& aF) const { + return std::forward(aF)(UnlockedConstReaderAndAtomicRWCRef()); + } + + // UnlockedConstReaderAndAtomicRW + + [[nodiscard]] UnlockedConstReaderAndAtomicRW& + UnlockedConstReaderAndAtomicRWRef() { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedConstReaderAndAtomicRW(F&& aF) { + return std::forward(aF)(UnlockedConstReaderAndAtomicRWRef()); + } + + // const UnlockedRWForLockedProfiler + + [[nodiscard]] const UnlockedRWForLockedProfiler& + UnlockedRWForLockedProfilerCRef() const { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedRWForLockedProfiler(F&& aF) const { + return std::forward(aF)(UnlockedRWForLockedProfilerCRef()); + } + + // UnlockedRWForLockedProfiler + + [[nodiscard]] UnlockedRWForLockedProfiler& + UnlockedRWForLockedProfilerRef() { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedRWForLockedProfiler(F&& aF) { + return std::forward(aF)(UnlockedRWForLockedProfilerRef()); + } + + // const UnlockedReaderAndAtomicRWOnThread + + [[nodiscard]] const UnlockedReaderAndAtomicRWOnThread& + UnlockedReaderAndAtomicRWOnThreadCRef() const { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedReaderAndAtomicRWOnThread(F&& aF) const { + return std::forward(aF)(UnlockedReaderAndAtomicRWOnThreadCRef()); + } + + // UnlockedReaderAndAtomicRWOnThread + + [[nodiscard]] UnlockedReaderAndAtomicRWOnThread& + UnlockedReaderAndAtomicRWOnThreadRef() { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedReaderAndAtomicRWOnThread(F&& aF) { + return std::forward(aF)(UnlockedReaderAndAtomicRWOnThreadRef()); + } + + // const LockedRWOnThread through ConstRWOnThreadWithLock + + // Locking order: Profiler, ThreadRegistry, ThreadRegistration. + class ConstRWOnThreadWithLock { + public: + [[nodiscard]] const LockedRWOnThread& DataCRef() const { + return mLockedRWOnThread; + } + [[nodiscard]] const LockedRWOnThread* operator->() const { + return &mLockedRWOnThread; + } + + private: + friend class OnThreadRef; + ConstRWOnThreadWithLock(const LockedRWOnThread& aLockedRWOnThread, + DataMutex& aDataMutex) + : mLockedRWOnThread(aLockedRWOnThread), mDataLock(aDataMutex) {} + + const LockedRWOnThread& mLockedRWOnThread; + DataLock mDataLock; + }; + + [[nodiscard]] ConstRWOnThreadWithLock ConstLockedRWOnThread() const { + return ConstRWOnThreadWithLock{mThreadRegistration->mData, + mThreadRegistration->mDataMutex}; + } + + template + auto WithConstLockedRWOnThread(F&& aF) const { + ConstRWOnThreadWithLock lockedData = ConstLockedRWOnThread(); + return std::forward(aF)(lockedData.DataCRef()); + } + + // LockedRWOnThread through RWOnThreadWithLock + + // Locking order: Profiler, ThreadRegistry, ThreadRegistration. + class RWOnThreadWithLock { + public: + [[nodiscard]] const LockedRWOnThread& DataCRef() const { + return mLockedRWOnThread; + } + [[nodiscard]] LockedRWOnThread& DataRef() { return mLockedRWOnThread; } + [[nodiscard]] const LockedRWOnThread* operator->() const { + return &mLockedRWOnThread; + } + [[nodiscard]] LockedRWOnThread* operator->() { + return &mLockedRWOnThread; + } + + private: + friend class OnThreadRef; + RWOnThreadWithLock(LockedRWOnThread& aLockedRWOnThread, + DataMutex& aDataMutex) + : mLockedRWOnThread(aLockedRWOnThread), mDataLock(aDataMutex) {} + + LockedRWOnThread& mLockedRWOnThread; + DataLock mDataLock; + }; + + [[nodiscard]] RWOnThreadWithLock GetLockedRWOnThread() { + return RWOnThreadWithLock{mThreadRegistration->mData, + mThreadRegistration->mDataMutex}; + } + + template + auto WithLockedRWOnThread(F&& aF) { + RWOnThreadWithLock lockedData = GetLockedRWOnThread(); + return std::forward(aF)(lockedData.DataRef()); + } + + // This is needed to allow OnThreadPtr::operator-> to return a temporary + // OnThreadRef object, for which `->` must work; Here it provides a pointer + // to itself, so that the next follow-up `->` will work as member accessor. + OnThreadRef* operator->() && { return this; } + + private: + // Only ThreadRegistration should construct an OnThreadRef. + friend class ThreadRegistration; + explicit OnThreadRef(ThreadRegistration& aThreadRegistration) + : mThreadRegistration(&aThreadRegistration) {} + + // Allow ThreadRegistry to read mThreadRegistration. + friend class ThreadRegistry; + + // Guaranted to be non-null by construction from a reference. + ThreadRegistration* mThreadRegistration; + }; + + // Pointer-like class pointing at the ThreadRegistration for the current + // thread, if one was registered. + class OnThreadPtr { + public: + [[nodiscard]] explicit operator bool() const { return mThreadRegistration; } + + // Note that this resolves to a temporary OnThreadRef object, which has all + // the allowed data accessors. + [[nodiscard]] OnThreadRef operator*() const { + MOZ_ASSERT(mThreadRegistration); + return OnThreadRef(*mThreadRegistration); + } + + // Note that this resolves to a temporary OnThreadRef object, which also + // overloads operator-> and has all the allowed data accessors. + [[nodiscard]] OnThreadRef operator->() const { + MOZ_ASSERT(mThreadRegistration); + return OnThreadRef(*mThreadRegistration); + } + + private: + friend class ThreadRegistration; + explicit OnThreadPtr(ThreadRegistration* aThreadRegistration) + : mThreadRegistration(aThreadRegistration) {} + + ThreadRegistration* mThreadRegistration; + }; + + [[nodiscard]] static OnThreadPtr GetOnThreadPtr() { + return OnThreadPtr{GetFromTLS()}; + } + + // Call `F(OnThreadRef)`. + template + static void WithOnThreadRef(F&& aF) { + const auto* tls = GetTLS(); + if (tls) { + ThreadRegistration* tr = tls->get(); + if (tr) { + std::forward(aF)(OnThreadRef{*tr}); + } + } + } + + // Call `F(OnThreadRef)`. + template + [[nodiscard]] static auto WithOnThreadRefOr(F&& aF, + FallbackReturn&& aFallbackReturn) + -> decltype(std::forward(aF)(std::declval())) { + const auto* tls = GetTLS(); + if (tls) { + ThreadRegistration* tr = tls->get(); + if (tr) { + return std::forward(aF)(OnThreadRef{*tr}); + } + } + return std::forward(aFallbackReturn); + } + + [[nodiscard]] static bool IsDataMutexLockedOnCurrentThread() { + if (const ThreadRegistration* tr = GetFromTLS(); tr) { + return tr->mDataMutex.IsLockedOnCurrentThread(); + } + return false; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { + DataLock lock(mDataMutex); + return mData.SizeOfExcludingThis(aMallocSizeOf); + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + // aMallocSizeOf can only be used on head-allocated objects. Stack + // allocations and static objects are not counted. + return (mIsOnHeap ? aMallocSizeOf(this) : 0) + + SizeOfExcludingThis(aMallocSizeOf); + } + + private: + friend class ThreadRegistry; + + // This is what is embedded inside ThreadRegistration. + // References to sub-classes will be provided, to limit access as appropriate. + class EmbeddedData final : public LockedRWOnThread { + private: + // Only ThreadRegistration can construct (its embedded) `mData`. + friend class ThreadRegistration; + EmbeddedData(const char* aName, const void* aStackTop) + : LockedRWOnThread(aName, aStackTop) {} + }; + EmbeddedData mData; + + // Used when writing on self thread, and for any access from any thread. + // Locking order: Profiler, ThreadRegistry, ThreadRegistration. + mutable DataMutex mDataMutex; + + // In case of nested (non-RAII) registrations. Only accessed on thread. + int mOtherRegistrations = 0; + + // Set to true if allocated by `RegisterThread()`. Otherwise we assume that it + // is on the stack. + bool mIsOnHeap = false; + + // Only accessed by ThreadRegistry on this thread. + bool mIsRegistryLockedSharedOnThisThread = false; + + static MOZ_THREAD_LOCAL(ThreadRegistration*) tlsThreadRegistration; + + [[nodiscard]] static decltype(tlsThreadRegistration)* GetTLS() { + static const bool initialized = tlsThreadRegistration.init(); + return initialized ? &tlsThreadRegistration : nullptr; + } + + [[nodiscard]] static ThreadRegistration* GetFromTLS() { + const auto tls = GetTLS(); + return tls ? tls->get() : nullptr; + } +}; + +} // namespace mozilla::profiler + +#endif // ProfilerThreadRegistration_h diff --git a/tools/profiler/public/ProfilerThreadRegistrationData.h b/tools/profiler/public/ProfilerThreadRegistrationData.h new file mode 100644 index 0000000000..7c14290e4c --- /dev/null +++ b/tools/profiler/public/ProfilerThreadRegistrationData.h @@ -0,0 +1,537 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// This header contains classes that hold data related to thread profiling: +// Data members are stored `protected` in `ThreadRegistrationData`. +// Non-virtual sub-classes of ProfilerThreadRegistrationData provide layers of +// public accessors to subsets of the data. Each level builds on the previous +// one and adds further access to more data, but always with the appropriate +// guards where necessary. +// These classes have protected constructors, so only some trusted classes +// `ThreadRegistration` and `ThreadRegistry` will be able to construct them, and +// then give limited access depending on who asks (the owning thread or another +// one), and how much data they actually need. +// +// The hierarchy is, from base to most derived: +// - ThreadRegistrationData +// - ThreadRegistrationUnlockedConstReader +// - ThreadRegistrationUnlockedConstReaderAndAtomicRW +// - ThreadRegistrationUnlockedRWForLockedProfiler +// - ThreadRegistrationUnlockedReaderAndAtomicRWOnThread +// - ThreadRegistrationLockedRWFromAnyThread +// - ThreadRegistrationLockedRWOnThread +// - ThreadRegistration::EmbeddedData (actual data member in ThreadRegistration) +// +// Tech detail: These classes need to be a single hierarchy so that +// `ThreadRegistration` can contain the most-derived class, and from there can +// publish references to base classes without relying on Undefined Behavior. +// (It's not allowed to have some object and give a reference to a sub-class, +// unless that object was *really* constructed as that sub-class at least, even +// if that sub-class only adds member functions!) +// And where appropriate, these references will come along with the required +// lock. + +#ifndef ProfilerThreadRegistrationData_h +#define ProfilerThreadRegistrationData_h + +#include "js/ProfilingFrameIterator.h" +#include "js/ProfilingStack.h" +#include "mozilla/Atomics.h" +#include "mozilla/BaseProfilerDetail.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/ProfilerThreadPlatformData.h" +#include "mozilla/ProfilerThreadRegistrationInfo.h" +#include "nsCOMPtr.h" +#include "nsIThread.h" + +class ProfiledThreadData; +class PSAutoLock; +struct JSContext; + +// Enum listing which profiling features are active for a single thread. +enum class ThreadProfilingFeatures : uint32_t { + // The thread is not being profiled at all (either the profiler is not + // running, or this thread is not examined during profiling.) + NotProfiled = 0u, + + // Single features, binary exclusive. May be `Combine()`d. + CPUUtilization = 1u << 0, + Sampling = 1u << 1, + Markers = 1u << 2, + + // All possible features. Usually used as a mask to see if any feature is + // active at a given time. + Any = CPUUtilization | Sampling | Markers +}; + +// Binary OR of one of more ThreadProfilingFeatures, to mix all arguments. +template +[[nodiscard]] constexpr ThreadProfilingFeatures Combine( + ThreadProfilingFeatures a1, Ts... as) { + static_assert((true && ... && + std::is_same_v>, + ThreadProfilingFeatures>)); + return static_cast( + (static_cast>(a1) | ... | + static_cast>(as))); +} + +// Binary AND of one of more ThreadProfilingFeatures, to find features common to +// all arguments. +template +[[nodiscard]] constexpr ThreadProfilingFeatures Intersect( + ThreadProfilingFeatures a1, Ts... as) { + static_assert((true && ... && + std::is_same_v>, + ThreadProfilingFeatures>)); + return static_cast( + (static_cast>(a1) & ... & + static_cast>(as))); +} + +// Are there features in common between the two given sets? +// Mostly useful to test if any of a set of features is present in another set. +template +[[nodiscard]] constexpr bool DoFeaturesIntersect(ThreadProfilingFeatures a1, + ThreadProfilingFeatures a2) { + return Intersect(a1, a2) != ThreadProfilingFeatures::NotProfiled; +} + +namespace mozilla::profiler { + +// All data members related to thread profiling are stored here. +// See derived classes below, which give limited unlocked/locked read/write +// access in different situations, and will be available through +// ThreadRegistration and ThreadRegistry. +class ThreadRegistrationData { + public: + // No public accessors here. See derived classes for accessors, and + // Get.../With... functions for who can use these accessors. + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { + // Not including data that is not fully owned here. + return 0; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + static constexpr size_t MAX_JS_FRAMES = 1024; + using JsFrame = JS::ProfilingFrameIterator::Frame; + using JsFrameBuffer = JsFrame[MAX_JS_FRAMES]; + + // `protected` to allow derived classes to read all data members. + protected: + ThreadRegistrationData(const char* aName, const void* aStackTop); + +#ifdef DEBUG + // Destructor only used to check invariants. + ~ThreadRegistrationData() { + MOZ_ASSERT((mProfilingFeatures != ThreadProfilingFeatures::NotProfiled) == + !!mProfiledThreadData); + MOZ_ASSERT(!mProfiledThreadData, + "mProfiledThreadData pointer should have been reset before " + "~ThreadRegistrationData"); + } +#endif // DEBUG + + // Permanent thread information. + // Set at construction, read from anywhere, moved-from at destruction. + ThreadRegistrationInfo mInfo; + + // Contains profiler labels and JS frames. + // Deep-written on thread only, deep-read from thread and suspended thread. + ProfilingStack mProfilingStack; + + // In practice, only read from thread and suspended thread. + PlatformData mPlatformData; + + // Only read from thread and suspended thread. + const void* const mStackTop; + + // Written from thread, read from thread and suspended thread. + nsCOMPtr mThread; + + // If this is a JS thread, this is its JSContext, which is required for any + // JS sampling. + // Written from thread, read from thread and suspended thread. + JSContext* mJSContext = nullptr; + + // If mJSContext is not null AND the thread is being profiled, this points at + // the start of a JsFrameBuffer to be used for on-thread synchronous sampling. + JsFrame* mJsFrameBuffer = nullptr; + + // The profiler needs to start and stop JS sampling of JS threads at various + // times. However, the JS engine can only do the required actions on the + // JS thread itself ("on-thread"), not from another thread ("off-thread"). + // Therefore, we have the following two-step process. + // + // - The profiler requests (on-thread or off-thread) that the JS sampling be + // started/stopped, by changing mJSSampling to the appropriate REQUESTED + // state. + // + // - The relevant JS thread polls (on-thread) for changes to mJSSampling. + // When it sees a REQUESTED state, it performs the appropriate actions to + // actually start/stop JS sampling, and changes mJSSampling out of the + // REQUESTED state. + // + // The state machine is as follows. + // + // INACTIVE --> ACTIVE_REQUESTED + // ^ ^ | + // | _/ | + // | _/ | + // | / | + // | v v + // INACTIVE_REQUESTED <-- ACTIVE + // + // The polling is done in the following two ways. + // + // - Via the interrupt callback mechanism; the JS thread must call + // profiler_js_interrupt_callback() from its own interrupt callback. + // This is how sampling must be started/stopped for threads where the + // request was made off-thread. + // + // - When {Start,Stop}JSSampling() is called on-thread, we can immediately + // follow it with a PollJSSampling() call to avoid the delay between the + // two steps. Likewise, setJSContext() calls PollJSSampling(). + // + // One non-obvious thing about all this: these JS sampling requests are made + // on all threads, even non-JS threads. mContext needs to also be set (via + // setJSContext(), which can only happen for JS threads) for any JS sampling + // to actually happen. + // + enum { + INACTIVE = 0, + ACTIVE_REQUESTED = 1, + ACTIVE = 2, + INACTIVE_REQUESTED = 3, + } mJSSampling = INACTIVE; + + uint32_t mJSFlags = 0; + + // Flags to conveniently track various JS instrumentations. + enum class JSInstrumentationFlags { + StackSampling = 0x1, + Allocations = 0x2, + }; + + [[nodiscard]] bool JSAllocationsEnabled() const { + return mJSFlags & uint32_t(JSInstrumentationFlags::Allocations); + } + + // The following members may be modified from another thread. + // They need to be atomic, because LockData() does not prevent reads from + // the owning thread. + + // mSleep tracks whether the thread is sleeping, and if so, whether it has + // been previously observed. This is used for an optimization: in some + // cases, when a thread is asleep, we duplicate the previous sample, which + // is cheaper than taking a new sample. + // + // mSleep is atomic because it is accessed from multiple threads. + // + // - It is written only by this thread, via setSleeping() and setAwake(). + // + // - It is read by SamplerThread::Run(). + // + // There are two cases where racing between threads can cause an issue. + // + // - If CanDuplicateLastSampleDueToSleep() returns false but that result is + // invalidated before being acted upon, we will take a full sample + // unnecessarily. This is additional work but won't cause any correctness + // issues. (In actual fact, this case is impossible. In order to go from + // CanDuplicateLastSampleDueToSleep() returning false to it returning true + // requires an intermediate call to it in order for mSleep to go from + // SLEEPING_NOT_OBSERVED to SLEEPING_OBSERVED.) + // + // - If CanDuplicateLastSampleDueToSleep() returns true but that result is + // invalidated before being acted upon -- i.e. the thread wakes up before + // DuplicateLastSample() is called -- we will duplicate the previous + // sample. This is inaccurate, but only slightly... we will effectively + // treat the thread as having slept a tiny bit longer than it really did. + // + // This latter inaccuracy could be avoided by moving the + // CanDuplicateLastSampleDueToSleep() check within the thread-freezing code, + // e.g. the section where Tick() is called. But that would reduce the + // effectiveness of the optimization because more code would have to be run + // before we can tell that duplication is allowed. + // + static const int AWAKE = 0; + static const int SLEEPING_NOT_OBSERVED = 1; + static const int SLEEPING_OBSERVED = 2; + // Read&written from thread and suspended thread. + Atomic mSleep{AWAKE}; + Atomic mThreadCpuTimeInNsAtLastSleep{0}; + +#ifdef NIGHTLY_BUILD + // The first wake is the thread creation. + Atomic mWakeCount{1}; + mutable baseprofiler::detail::BaseProfilerMutex mRecordWakeCountMutex; + mutable uint64_t mAlreadyRecordedWakeCount = 0; + mutable uint64_t mAlreadyRecordedCpuTimeInMs = 0; +#endif + + // Is this thread currently being profiled, and with which features? + // Written from profiler, read from any thread. + // Invariant: `!!mProfilingFeatures == !!mProfiledThreadData` (set together.) + Atomic mProfilingFeatures{ + ThreadProfilingFeatures::NotProfiled}; + + // If the profiler is active and this thread is selected for profiling, this + // points at the relevant ProfiledThreadData. + // Fully controlled by the profiler. + // Invariant: `!!mProfilingFeatures == !!mProfiledThreadData` (set together). + ProfiledThreadData* mProfiledThreadData = nullptr; +}; + +// Accessing const data from any thread. +class ThreadRegistrationUnlockedConstReader : public ThreadRegistrationData { + public: + [[nodiscard]] const ThreadRegistrationInfo& Info() const { return mInfo; } + + [[nodiscard]] const PlatformData& PlatformDataCRef() const { + return mPlatformData; + } + + [[nodiscard]] const void* StackTop() const { return mStackTop; } + + protected: + ThreadRegistrationUnlockedConstReader(const char* aName, + const void* aStackTop) + : ThreadRegistrationData(aName, aStackTop) {} +}; + +// Accessing atomic data from any thread. +class ThreadRegistrationUnlockedConstReaderAndAtomicRW + : public ThreadRegistrationUnlockedConstReader { + public: + [[nodiscard]] const ProfilingStack& ProfilingStackCRef() const { + return mProfilingStack; + } + [[nodiscard]] ProfilingStack& ProfilingStackRef() { return mProfilingStack; } + + // Similar to `profiler_is_active()`, this atomic flag may become out-of-date. + // It should only be used as an indication to know whether this thread is + // probably being profiled (with some specific features), to avoid doing + // expensive operations otherwise. Edge cases: + // - This thread could get `NotProfiled`, but the profiler has just started, + // so some very early data may be missing. No real impact on profiling. + // - This thread could see profiled features, but the profiled has just + // stopped, so some some work will be done and then discarded when finally + // attempting to write to the buffer. No impact on profiling. + // - This thread could see profiled features, but the profiler will quickly + // stop and restart, so this thread will write information relevant to the + // previous profiling session. Very rare, and little impact on profiling. + [[nodiscard]] ThreadProfilingFeatures ProfilingFeatures() const { + return mProfilingFeatures; + } + + // Call this whenever the current thread sleeps. Calling it twice in a row + // without an intervening setAwake() call is an error. + void SetSleeping() { + MOZ_ASSERT(mSleep == AWAKE); + mSleep = SLEEPING_NOT_OBSERVED; + } + + // Call this whenever the current thread wakes. Calling it twice in a row + // without an intervening setSleeping() call is an error. + void SetAwake() { + MOZ_ASSERT(mSleep != AWAKE); + mSleep = AWAKE; +#ifdef NIGHTLY_BUILD + ++mWakeCount; +#endif + } + + // Returns the CPU time used by the thread since the previous call to this + // method or since the thread was started if this is the first call. + uint64_t GetNewCpuTimeInNs() { + uint64_t newCpuTimeNs; + if (!GetCpuTimeSinceThreadStartInNs(&newCpuTimeNs, PlatformDataCRef())) { + newCpuTimeNs = 0; + } + uint64_t before = mThreadCpuTimeInNsAtLastSleep; + uint64_t result = + MOZ_LIKELY(newCpuTimeNs > before) ? newCpuTimeNs - before : 0; + mThreadCpuTimeInNsAtLastSleep = newCpuTimeNs; + return result; + } + +#ifdef NIGHTLY_BUILD + void RecordWakeCount() const; +#endif + + // This is called on every profiler restart. Put things that should happen + // at that time here. + void ReinitializeOnResume() { + // This is needed to cause an initial sample to be taken from sleeping + // threads that had been observed prior to the profiler stopping and + // restarting. Otherwise sleeping threads would not have any samples to + // copy forward while sleeping. + (void)mSleep.compareExchange(SLEEPING_OBSERVED, SLEEPING_NOT_OBSERVED); + } + + // This returns true for the second and subsequent calls in each sleep + // cycle, so that the sampler can skip its full sampling and reuse the first + // asleep sample instead. + [[nodiscard]] bool CanDuplicateLastSampleDueToSleep() { + if (mSleep == AWAKE) { + return false; + } + if (mSleep.compareExchange(SLEEPING_NOT_OBSERVED, SLEEPING_OBSERVED)) { + return false; + } + return true; + } + + [[nodiscard]] bool IsSleeping() const { return mSleep != AWAKE; } + + protected: + ThreadRegistrationUnlockedConstReaderAndAtomicRW(const char* aName, + const void* aStackTop) + : ThreadRegistrationUnlockedConstReader(aName, aStackTop) {} +}; + +// Like above, with special PSAutoLock-guarded accessors. +class ThreadRegistrationUnlockedRWForLockedProfiler + : public ThreadRegistrationUnlockedConstReaderAndAtomicRW { + public: + // IMPORTANT! IMPORTANT! IMPORTANT! IMPORTANT! IMPORTANT! IMPORTANT! + // Only add functions that take a `const PSAutoLock&` proof-of-lock. + // (Because there is no other lock.) + + [[nodiscard]] const ProfiledThreadData* GetProfiledThreadData( + const PSAutoLock&) const { + return mProfiledThreadData; + } + + [[nodiscard]] ProfiledThreadData* GetProfiledThreadData(const PSAutoLock&) { + return mProfiledThreadData; + } + + protected: + ThreadRegistrationUnlockedRWForLockedProfiler(const char* aName, + const void* aStackTop) + : ThreadRegistrationUnlockedConstReaderAndAtomicRW(aName, aStackTop) {} +}; + +// Reading data, unlocked from the thread, or locked otherwise. +// This data MUST only be written from the thread with lock (i.e., in +// LockedRWOnThread through RWOnThreadWithLock.) +class ThreadRegistrationUnlockedReaderAndAtomicRWOnThread + : public ThreadRegistrationUnlockedRWForLockedProfiler { + public: + // IMPORTANT! IMPORTANT! IMPORTANT! IMPORTANT! IMPORTANT! IMPORTANT! + // Non-atomic members read here MUST be written from LockedRWOnThread (to + // guarantee that they are only modified on this thread.) + + [[nodiscard]] JSContext* GetJSContext() const { return mJSContext; } + + protected: + ThreadRegistrationUnlockedReaderAndAtomicRWOnThread(const char* aName, + const void* aStackTop) + : ThreadRegistrationUnlockedRWForLockedProfiler(aName, aStackTop) {} +}; + +// Accessing locked data from the thread, or from any thread through the locked +// profiler: + +// Like above, and profiler can also read&write mutex-protected members. +class ThreadRegistrationLockedRWFromAnyThread + : public ThreadRegistrationUnlockedReaderAndAtomicRWOnThread { + public: + void SetProfilingFeaturesAndData(ThreadProfilingFeatures aProfilingFeatures, + ProfiledThreadData* aProfiledThreadData, + const PSAutoLock&); + void ClearProfilingFeaturesAndData(const PSAutoLock&); + + // Not null when JSContext is not null AND this thread is being profiled. + // Points at the start of JsFrameBuffer. + [[nodiscard]] JsFrame* GetJsFrameBuffer() const { return mJsFrameBuffer; } + + [[nodiscard]] const nsCOMPtr GetEventTarget() const { + return mThread; + } + + void ResetMainThread(nsIThread* aThread) { mThread = aThread; } + + // aDelay is the time the event that is currently running on the thread was + // queued before starting to run (if a PrioritizedEventQueue + // (i.e. MainThread), this will be 0 for any event at a lower priority + // than Input). + // aRunning is the time the event has been running. If no event is running + // these will both be TimeDuration() (i.e. 0). Both are out params, and are + // always set. Their initial value is discarded. + void GetRunningEventDelay(const TimeStamp& aNow, TimeDuration& aDelay, + TimeDuration& aRunning) { + if (mThread) { // can be null right at the start of a process + TimeStamp start; + mThread->GetRunningEventDelay(&aDelay, &start); + if (!start.IsNull()) { + // Note: the timestamp used here will be from when we started to + // suspend and sample the thread; which is also the timestamp + // associated with the sample. + aRunning = aNow - start; + return; + } + } + aDelay = TimeDuration(); + aRunning = TimeDuration(); + } + + // Request that this thread start JS sampling. JS sampling won't actually + // start until a subsequent PollJSSampling() call occurs *and* mContext has + // been set. + void StartJSSampling(uint32_t aJSFlags) { + // This function runs on-thread or off-thread. + + MOZ_RELEASE_ASSERT(mJSSampling == INACTIVE || + mJSSampling == INACTIVE_REQUESTED); + mJSSampling = ACTIVE_REQUESTED; + mJSFlags = aJSFlags; + } + + // Request that this thread stop JS sampling. JS sampling won't actually + // stop until a subsequent PollJSSampling() call occurs. + void StopJSSampling() { + // This function runs on-thread or off-thread. + + MOZ_RELEASE_ASSERT(mJSSampling == ACTIVE || + mJSSampling == ACTIVE_REQUESTED); + mJSSampling = INACTIVE_REQUESTED; + } + + protected: + ThreadRegistrationLockedRWFromAnyThread(const char* aName, + const void* aStackTop) + : ThreadRegistrationUnlockedReaderAndAtomicRWOnThread(aName, aStackTop) {} +}; + +// Accessing data, locked, from the thread. +// If any non-atomic data is readable from UnlockedReaderAndAtomicRWOnThread, +// it must be written from here, and not in base classes: Since this data is +// only written on the thread, it can be read from the same thread without +// lock; but writing must be locked so that other threads can safely read it, +// typically from LockedRWFromAnyThread. +class ThreadRegistrationLockedRWOnThread + : public ThreadRegistrationLockedRWFromAnyThread { + public: + void SetJSContext(JSContext* aJSContext); + void ClearJSContext(); + + // Poll to see if JS sampling should be started/stopped. + void PollJSSampling(); + + public: + ThreadRegistrationLockedRWOnThread(const char* aName, const void* aStackTop) + : ThreadRegistrationLockedRWFromAnyThread(aName, aStackTop) {} +}; + +} // namespace mozilla::profiler + +#endif // ProfilerThreadRegistrationData_h diff --git a/tools/profiler/public/ProfilerThreadRegistrationInfo.h b/tools/profiler/public/ProfilerThreadRegistrationInfo.h new file mode 100644 index 0000000000..e116c3059e --- /dev/null +++ b/tools/profiler/public/ProfilerThreadRegistrationInfo.h @@ -0,0 +1,64 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerThreadRegistrationInfo_h +#define ProfilerThreadRegistrationInfo_h + +#include "mozilla/BaseAndGeckoProfilerDetail.h" +#include "mozilla/ProfilerUtils.h" +#include "mozilla/TimeStamp.h" + +#include + +namespace mozilla::profiler { + +// This class contains immutable information about a thread which needs to be +// stored across restarts of the profiler and which can be useful even after the +// thread has stopped running. +class ThreadRegistrationInfo { + public: + // Construct on the thread. + explicit ThreadRegistrationInfo(const char* aName) : mName(aName) {} + + // Construct for a foreign thread (e.g., Java). + ThreadRegistrationInfo(const char* aName, ProfilerThreadId aThreadId, + bool aIsMainThread, const TimeStamp& aRegisterTime) + : mName(aName), + mRegisterTime(aRegisterTime), + mThreadId(aThreadId), + mIsMainThread(aIsMainThread) {} + + // Only allow move construction, for extraction when the thread ends. + ThreadRegistrationInfo(ThreadRegistrationInfo&&) = default; + + // Other copies/moves disallowed. + ThreadRegistrationInfo(const ThreadRegistrationInfo&) = delete; + ThreadRegistrationInfo& operator=(const ThreadRegistrationInfo&) = delete; + ThreadRegistrationInfo& operator=(ThreadRegistrationInfo&&) = delete; + + [[nodiscard]] const char* Name() const { return mName.c_str(); } + [[nodiscard]] const TimeStamp& RegisterTime() const { return mRegisterTime; } + [[nodiscard]] ProfilerThreadId ThreadId() const { return mThreadId; } + [[nodiscard]] bool IsMainThread() const { return mIsMainThread; } + + private: + static TimeStamp ExistingRegisterTimeOrNow() { + TimeStamp registerTime = baseprofiler::detail::GetThreadRegistrationTime(); + if (!registerTime) { + registerTime = TimeStamp::Now(); + } + return registerTime; + } + + const std::string mName; + const TimeStamp mRegisterTime = ExistingRegisterTimeOrNow(); + const ProfilerThreadId mThreadId = profiler_current_thread_id(); + const bool mIsMainThread = profiler_is_main_thread(); +}; + +} // namespace mozilla::profiler + +#endif // ProfilerThreadRegistrationInfo_h diff --git a/tools/profiler/public/ProfilerThreadRegistry.h b/tools/profiler/public/ProfilerThreadRegistry.h new file mode 100644 index 0000000000..4d0fd3ef68 --- /dev/null +++ b/tools/profiler/public/ProfilerThreadRegistry.h @@ -0,0 +1,321 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerThreadRegistry_h +#define ProfilerThreadRegistry_h + +#include "mozilla/BaseProfilerDetail.h" +#include "mozilla/ProfilerThreadRegistration.h" +#include "mozilla/Vector.h" + +namespace mozilla::profiler { + +class ThreadRegistry { + private: + using RegistryMutex = baseprofiler::detail::BaseProfilerSharedMutex; + using RegistryLockExclusive = + baseprofiler::detail::BaseProfilerAutoLockExclusive; + using RegistryLockShared = baseprofiler::detail::BaseProfilerAutoLockShared; + + public: + // Aliases to data accessors (removing the ThreadRegistration prefix). + + using UnlockedConstReader = ThreadRegistrationUnlockedConstReader; + using UnlockedConstReaderAndAtomicRW = + ThreadRegistrationUnlockedConstReaderAndAtomicRW; + using UnlockedRWForLockedProfiler = + ThreadRegistrationUnlockedRWForLockedProfiler; + using UnlockedReaderAndAtomicRWOnThread = + ThreadRegistrationUnlockedReaderAndAtomicRWOnThread; + using LockedRWFromAnyThread = ThreadRegistrationLockedRWFromAnyThread; + using LockedRWOnThread = ThreadRegistrationLockedRWOnThread; + + // Off-thread access through the registry, providing the following data + // accessors: UnlockedConstReader, UnlockedConstReaderAndAtomicRW, + // UnlockedRWForLockedProfiler, and LockedRWFromAnyThread. + // (See ThreadRegistration class for ON-thread access.) + + // Reference-like class pointing at a ThreadRegistration. + // It should only exist while sRegistryMutex is locked. + class OffThreadRef { + public: + // const UnlockedConstReader + + [[nodiscard]] const UnlockedConstReader& UnlockedConstReaderCRef() const { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedConstReader(F&& aF) const { + return std::forward(aF)(UnlockedConstReaderCRef()); + } + + // const UnlockedConstReaderAndAtomicRW + + [[nodiscard]] const UnlockedConstReaderAndAtomicRW& + UnlockedConstReaderAndAtomicRWCRef() const { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedConstReaderAndAtomicRW(F&& aF) const { + return std::forward(aF)(UnlockedConstReaderAndAtomicRWCRef()); + } + + // UnlockedConstReaderAndAtomicRW + + [[nodiscard]] UnlockedConstReaderAndAtomicRW& + UnlockedConstReaderAndAtomicRWRef() { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedConstReaderAndAtomicRW(F&& aF) { + return std::forward(aF)(UnlockedConstReaderAndAtomicRWRef()); + } + + // const UnlockedRWForLockedProfiler + + [[nodiscard]] const UnlockedRWForLockedProfiler& + UnlockedRWForLockedProfilerCRef() const { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedRWForLockedProfiler(F&& aF) const { + return std::forward(aF)(UnlockedRWForLockedProfilerCRef()); + } + + // UnlockedRWForLockedProfiler + + [[nodiscard]] UnlockedRWForLockedProfiler& + UnlockedRWForLockedProfilerRef() { + return mThreadRegistration->mData; + } + + template + auto WithUnlockedRWForLockedProfiler(F&& aF) { + return std::forward(aF)(UnlockedRWForLockedProfilerRef()); + } + + // const LockedRWFromAnyThread through ConstRWFromAnyThreadWithLock + + class ConstRWFromAnyThreadWithLock { + public: + [[nodiscard]] const LockedRWFromAnyThread& DataCRef() const { + return mLockedRWFromAnyThread; + } + [[nodiscard]] const LockedRWFromAnyThread* operator->() const { + return &mLockedRWFromAnyThread; + } + + ConstRWFromAnyThreadWithLock( + const LockedRWFromAnyThread& aLockedRWFromAnyThread, + ThreadRegistration::DataMutex& aDataMutex) + : mLockedRWFromAnyThread(aLockedRWFromAnyThread), + mDataLock(aDataMutex) {} + + private: + const LockedRWFromAnyThread& mLockedRWFromAnyThread; + ThreadRegistration::DataLock mDataLock; + }; + + [[nodiscard]] ConstRWFromAnyThreadWithLock ConstLockedRWFromAnyThread() + const { + return ConstRWFromAnyThreadWithLock{mThreadRegistration->mData, + mThreadRegistration->mDataMutex}; + } + + template + auto WithConstLockedRWFromAnyThread(F&& aF) const { + ConstRWFromAnyThreadWithLock lockedData = ConstLockedRWFromAnyThread(); + return std::forward(aF)(lockedData.DataCRef()); + } + + // LockedRWFromAnyThread through RWFromAnyThreadWithLock + + class RWFromAnyThreadWithLock { + public: + [[nodiscard]] const LockedRWFromAnyThread& DataCRef() const { + return mLockedRWFromAnyThread; + } + [[nodiscard]] LockedRWFromAnyThread& DataRef() { + return mLockedRWFromAnyThread; + } + [[nodiscard]] const LockedRWFromAnyThread* operator->() const { + return &mLockedRWFromAnyThread; + } + [[nodiscard]] LockedRWFromAnyThread* operator->() { + return &mLockedRWFromAnyThread; + } + + // In some situations, it may be useful to do some on-thread operations if + // we are indeed on this thread now. The lock is still held here; caller + // should not use this pointer longer than this RWFromAnyThreadWithLock. + [[nodiscard]] LockedRWOnThread* GetLockedRWOnThread() { + if (mLockedRWFromAnyThread.Info().ThreadId() == + profiler_current_thread_id()) { + // mLockedRWFromAnyThread references a subclass of the + // ThreadRegistration's mData, so it's safe to downcast it to another + // hierarchy level of the object. + return &static_cast(mLockedRWFromAnyThread); + } + return nullptr; + } + + private: + friend class OffThreadRef; + RWFromAnyThreadWithLock(LockedRWFromAnyThread& aLockedRWFromAnyThread, + ThreadRegistration::DataMutex& aDataMutex) + : mLockedRWFromAnyThread(aLockedRWFromAnyThread), + mDataLock(aDataMutex) {} + + LockedRWFromAnyThread& mLockedRWFromAnyThread; + ThreadRegistration::DataLock mDataLock; + }; + + [[nodiscard]] RWFromAnyThreadWithLock GetLockedRWFromAnyThread() { + return RWFromAnyThreadWithLock{mThreadRegistration->mData, + mThreadRegistration->mDataMutex}; + } + + template + auto WithLockedRWFromAnyThread(F&& aF) { + RWFromAnyThreadWithLock lockedData = GetLockedRWFromAnyThread(); + return std::forward(aF)(lockedData.DataRef()); + } + + private: + // Only ThreadRegistry should construct an OnThreadRef. + friend class ThreadRegistry; + explicit OffThreadRef(ThreadRegistration& aThreadRegistration) + : mThreadRegistration(&aThreadRegistration) {} + + // If we have an ON-thread ref, it's safe to convert to an OFF-thread ref. + explicit OffThreadRef(ThreadRegistration::OnThreadRef aOnThreadRef) + : mThreadRegistration(aOnThreadRef.mThreadRegistration) {} + + [[nodiscard]] bool IsPointingAt( + ThreadRegistration& aThreadRegistration) const { + return mThreadRegistration == &aThreadRegistration; + } + + // Guaranted to be non-null by construction. + ThreadRegistration* mThreadRegistration; + }; + + // Lock the registry non-exclusively and allow iteration. E.g.: + // `for (OffThreadRef thread : LockedRegistry{}) { ... }` + // Do *not* export copies/references, as they could become dangling. + // Locking order: Profiler, ThreadRegistry, ThreadRegistration. + class LockedRegistry { + public: + LockedRegistry() + : mRegistryLock([]() -> RegistryMutex& { + MOZ_ASSERT(!IsRegistryMutexLockedOnCurrentThread(), + "Recursive locking detected"); + // In DEBUG builds, *before* we attempt to lock sRegistryMutex, we + // want to check that the ThreadRegistration mutex is *not* locked + // on this thread, to avoid inversion deadlocks. + MOZ_ASSERT(!ThreadRegistration::IsDataMutexLockedOnCurrentThread()); + return sRegistryMutex; + }()) { + ThreadRegistration::WithOnThreadRef( + [](ThreadRegistration::OnThreadRef aOnThreadRef) { + aOnThreadRef.mThreadRegistration + ->mIsRegistryLockedSharedOnThisThread = true; + }); + } + + ~LockedRegistry() { + ThreadRegistration::WithOnThreadRef( + [](ThreadRegistration::OnThreadRef aOnThreadRef) { + aOnThreadRef.mThreadRegistration + ->mIsRegistryLockedSharedOnThisThread = false; + }); + } + + [[nodiscard]] const OffThreadRef* begin() const { + return sRegistryContainer.begin(); + } + [[nodiscard]] OffThreadRef* begin() { return sRegistryContainer.begin(); } + [[nodiscard]] const OffThreadRef* end() const { + return sRegistryContainer.end(); + } + [[nodiscard]] OffThreadRef* end() { return sRegistryContainer.end(); } + + private: + RegistryLockShared mRegistryLock; + }; + + // Call `F(OffThreadRef)` for the given aThreadId. + template + static void WithOffThreadRef(ProfilerThreadId aThreadId, F&& aF) { + for (OffThreadRef thread : LockedRegistry{}) { + if (thread.UnlockedConstReaderCRef().Info().ThreadId() == aThreadId) { + std::forward(aF)(thread); + break; + } + } + } + + template + [[nodiscard]] static auto WithOffThreadRefOr(ProfilerThreadId aThreadId, + F&& aF, + FallbackReturn&& aFallbackReturn) + -> decltype(std::forward(aF)(std::declval())) { + for (OffThreadRef thread : LockedRegistry{}) { + if (thread.UnlockedConstReaderCRef().Info().ThreadId() == aThreadId) { + return std::forward(aF)(thread); + } + } + return std::forward(aFallbackReturn); + } + + static size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) { + LockedRegistry lockedRegistry; + // "Ex" because we don't count static objects, but we count whatever they + // allocated on the heap. + size_t bytes = sRegistryContainer.sizeOfExcludingThis(aMallocSizeOf); + for (const OffThreadRef& offThreadRef : lockedRegistry) { + bytes += + offThreadRef.mThreadRegistration->SizeOfExcludingThis(aMallocSizeOf); + } + return bytes; + } + + static size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) { + return SizeOfExcludingThis(aMallocSizeOf); + } + + [[nodiscard]] static bool IsRegistryMutexLockedOnCurrentThread() { + return sRegistryMutex.IsLockedExclusiveOnCurrentThread() || + ThreadRegistration::WithOnThreadRefOr( + [](ThreadRegistration::OnThreadRef aOnThreadRef) { + return aOnThreadRef.mThreadRegistration + ->mIsRegistryLockedSharedOnThisThread; + }, + false); + } + + private: + using RegistryContainer = Vector; + + static RegistryContainer sRegistryContainer; + + // Mutex protecting the registry. + // Locking order: Profiler, ThreadRegistry, ThreadRegistration. + static RegistryMutex sRegistryMutex; + + // Only allow ThreadRegistration to (un)register itself. + friend class ThreadRegistration; + static void Register(ThreadRegistration::OnThreadRef aOnThreadRef); + static void Unregister(ThreadRegistration::OnThreadRef aOnThreadRef); +}; + +} // namespace mozilla::profiler + +#endif // ProfilerThreadRegistry_h diff --git a/tools/profiler/public/ProfilerThreadSleep.h b/tools/profiler/public/ProfilerThreadSleep.h new file mode 100644 index 0000000000..730176d39f --- /dev/null +++ b/tools/profiler/public/ProfilerThreadSleep.h @@ -0,0 +1,58 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// APIs that inform the profiler when a thread is effectively asleep so that we +// can avoid sampling it more than once. + +#ifndef ProfilerThreadSleep_h +#define ProfilerThreadSleep_h + +#ifndef MOZ_GECKO_PROFILER + +// This file can be #included unconditionally. However, everything within this +// file must be guarded by a #ifdef MOZ_GECKO_PROFILER, *except* for the +// following macros and functions, which encapsulate the most common operations +// and thus avoid the need for many #ifdefs. + +# define AUTO_PROFILER_THREAD_SLEEP + +static inline void profiler_thread_sleep() {} + +static inline void profiler_thread_wake() {} + +#else // !MOZ_GECKO_PROFILER + +# include "mozilla/Attributes.h" +# include "mozilla/BaseProfilerRAIIMacro.h" + +// These functions tell the profiler that a thread went to sleep so that we can +// avoid sampling it more than once while it's sleeping. Calling +// profiler_thread_sleep() twice without an intervening profiler_thread_wake() +// is an error. All three functions operate the same whether the profiler is +// active or inactive. +void profiler_thread_sleep(); +void profiler_thread_wake(); + +// Mark a thread as asleep within a scope. +// (See also AUTO_PROFILER_THREAD_WAKE in ProfilerThreadState.h) +# define AUTO_PROFILER_THREAD_SLEEP \ + mozilla::AutoProfilerThreadSleep PROFILER_RAII + +namespace mozilla { + +// (See also AutoProfilerThreadWake in ProfilerThreadState.h) +class MOZ_RAII AutoProfilerThreadSleep { + public: + explicit AutoProfilerThreadSleep() { profiler_thread_sleep(); } + + ~AutoProfilerThreadSleep() { profiler_thread_wake(); } +}; + +} // namespace mozilla + +#endif // !MOZ_GECKO_PROFILER + +#endif // ProfilerThreadSleep_h diff --git a/tools/profiler/public/ProfilerThreadState.h b/tools/profiler/public/ProfilerThreadState.h new file mode 100644 index 0000000000..6ac48e41dd --- /dev/null +++ b/tools/profiler/public/ProfilerThreadState.h @@ -0,0 +1,128 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// This header contains functions that give information about the Profiler state +// with regards to the current thread. + +#ifndef ProfilerThreadState_h +#define ProfilerThreadState_h + +#include "mozilla/ProfilerState.h" +#include "mozilla/ProfilerThreadRegistration.h" +#include "mozilla/ProfilerThreadRegistry.h" +#include "mozilla/ProfilerThreadSleep.h" + +// During profiling, if the current thread is registered, return true +// (regardless of whether it is actively being profiled). +// (Same caveats and recommended usage as profiler_is_active().) +[[nodiscard]] inline bool profiler_is_active_and_thread_is_registered() { + return profiler_is_active() && + mozilla::profiler::ThreadRegistration::IsRegistered(); +} + +// Is the profiler active and unpaused, and is the current thread being +// profiled for any of the given features? (Same caveats and recommended usage +// as profiler_is_active().) +[[nodiscard]] inline bool profiler_thread_is_being_profiled( + ThreadProfilingFeatures aThreadProfilingFeatures) { + return profiler_is_active_and_unpaused() && + mozilla::profiler::ThreadRegistration::WithOnThreadRefOr( + [aThreadProfilingFeatures]( + mozilla::profiler::ThreadRegistration::OnThreadRef aTR) { + return DoFeaturesIntersect( + aTR.UnlockedConstReaderAndAtomicRWCRef().ProfilingFeatures(), + aThreadProfilingFeatures); + }, + false); +} + +// Is the profiler active and unpaused, and is the given thread being profiled? +// (Same caveats and recommended usage as profiler_is_active().) +// Safe to use with the current thread id, or unspecified ProfilerThreadId (same +// as current thread id). +[[nodiscard]] inline bool profiler_thread_is_being_profiled( + const ProfilerThreadId& aThreadId, + ThreadProfilingFeatures aThreadProfilingFeatures) { + if (!profiler_is_active_and_unpaused()) { + return false; + } + + if (!aThreadId.IsSpecified() || aThreadId == profiler_current_thread_id()) { + // For the current thread id, use the ThreadRegistration directly, it is + // more efficient. + return mozilla::profiler::ThreadRegistration::WithOnThreadRefOr( + [aThreadProfilingFeatures]( + mozilla::profiler::ThreadRegistration::OnThreadRef aTR) { + return DoFeaturesIntersect( + aTR.UnlockedConstReaderAndAtomicRWCRef().ProfilingFeatures(), + aThreadProfilingFeatures); + }, + false); + } + + // For other threads, go through the ThreadRegistry. + return mozilla::profiler::ThreadRegistry::WithOffThreadRefOr( + aThreadId, + [aThreadProfilingFeatures]( + mozilla::profiler::ThreadRegistry::OffThreadRef aTR) { + return DoFeaturesIntersect( + aTR.UnlockedConstReaderAndAtomicRWCRef().ProfilingFeatures(), + aThreadProfilingFeatures); + }, + false); +} + +// Is the current thread registered and sleeping? +[[nodiscard]] inline bool profiler_thread_is_sleeping() { + return profiler_is_active() && + mozilla::profiler::ThreadRegistration::WithOnThreadRefOr( + [](mozilla::profiler::ThreadRegistration::OnThreadRef aTR) { + return aTR.UnlockedConstReaderAndAtomicRWCRef().IsSleeping(); + }, + false); +} + +#ifndef MOZ_GECKO_PROFILER + +# define AUTO_PROFILER_THREAD_WAKE + +#else // !MOZ_GECKO_PROFILER + +// Mark a thread as awake within a scope. +// (See also AUTO_PROFILER_THREAD_SLEEP in mozilla/ProfilerThreadSleep.h) +# define AUTO_PROFILER_THREAD_WAKE \ + mozilla::AutoProfilerThreadWake PROFILER_RAII + +namespace mozilla { + +// Temporarily wake up the profiling of a thread while servicing events such as +// Asynchronous Procedure Calls (APCs). +// (See also AutoProfilerThreadSleep in ProfilerThreadSleep.h) +class MOZ_RAII AutoProfilerThreadWake { + public: + explicit AutoProfilerThreadWake() + : mIssuedWake(profiler_thread_is_sleeping()) { + if (mIssuedWake) { + profiler_thread_wake(); + } + } + + ~AutoProfilerThreadWake() { + if (mIssuedWake) { + MOZ_ASSERT(!profiler_thread_is_sleeping()); + profiler_thread_sleep(); + } + } + + private: + bool mIssuedWake; +}; + +} // namespace mozilla + +#endif // !MOZ_GECKO_PROFILER + +#endif // ProfilerThreadState_h diff --git a/tools/profiler/public/ProfilerUtils.h b/tools/profiler/public/ProfilerUtils.h new file mode 100644 index 0000000000..3969761e18 --- /dev/null +++ b/tools/profiler/public/ProfilerUtils.h @@ -0,0 +1,32 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ProfilerUtils_h +#define ProfilerUtils_h + +// This header contains most process- and thread-related functions. +// It is safe to include unconditionally. + +#include "mozilla/BaseProfilerUtils.h" + +using ProfilerProcessId = mozilla::baseprofiler::BaseProfilerProcessId; +using ProfilerThreadId = mozilla::baseprofiler::BaseProfilerThreadId; + +// Get the current process's ID. +[[nodiscard]] ProfilerProcessId profiler_current_process_id(); + +// Get the current thread's ID. +[[nodiscard]] ProfilerThreadId profiler_current_thread_id(); + +// Must be called at least once from the main thread, before any other main- +// thread id function. +void profiler_init_main_thread_id(); + +[[nodiscard]] ProfilerThreadId profiler_main_thread_id(); + +[[nodiscard]] bool profiler_is_main_thread(); + +#endif // ProfilerUtils_h diff --git a/tools/profiler/public/shared-libraries.h b/tools/profiler/public/shared-libraries.h new file mode 100644 index 0000000000..dfd3599e71 --- /dev/null +++ b/tools/profiler/public/shared-libraries.h @@ -0,0 +1,213 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef SHARED_LIBRARIES_H_ +#define SHARED_LIBRARIES_H_ + +#ifndef MOZ_GECKO_PROFILER +# error This header does not have a useful implementation on your platform! +#endif + +#include "nsNativeCharsetUtils.h" +#include "nsString.h" +#include + +#include +#include +#include +#include +#include + +namespace IPC { +class MessageReader; +class MessageWriter; +template +struct ParamTraits; +} // namespace IPC + +class SharedLibrary { + public: + SharedLibrary(uintptr_t aStart, uintptr_t aEnd, uintptr_t aOffset, + const nsCString& aBreakpadId, const nsCString& aCodeId, + const nsString& aModuleName, const nsString& aModulePath, + const nsString& aDebugName, const nsString& aDebugPath, + const nsCString& aVersion, const char* aArch) + : mStart(aStart), + mEnd(aEnd), + mOffset(aOffset), + mBreakpadId(aBreakpadId), + mCodeId(aCodeId), + mModuleName(aModuleName), + mModulePath(aModulePath), + mDebugName(aDebugName), + mDebugPath(aDebugPath), + mVersion(aVersion), + mArch(aArch) {} + + bool operator==(const SharedLibrary& other) const { + return (mStart == other.mStart) && (mEnd == other.mEnd) && + (mOffset == other.mOffset) && (mModuleName == other.mModuleName) && + (mModulePath == other.mModulePath) && + (mDebugName == other.mDebugName) && + (mDebugPath == other.mDebugPath) && + (mBreakpadId == other.mBreakpadId) && (mCodeId == other.mCodeId) && + (mVersion == other.mVersion) && (mArch == other.mArch); + } + + uintptr_t GetStart() const { return mStart; } + uintptr_t GetEnd() const { return mEnd; } + uintptr_t GetOffset() const { return mOffset; } + const nsCString& GetBreakpadId() const { return mBreakpadId; } + const nsCString& GetCodeId() const { return mCodeId; } + const nsString& GetModuleName() const { return mModuleName; } + const nsString& GetModulePath() const { return mModulePath; } + const std::string GetNativeDebugPath() const { + nsAutoCString debugPathStr; + + NS_CopyUnicodeToNative(mDebugPath, debugPathStr); + + return debugPathStr.get(); + } + const nsString& GetDebugName() const { return mDebugName; } + const nsString& GetDebugPath() const { return mDebugPath; } + const nsCString& GetVersion() const { return mVersion; } + const std::string& GetArch() const { return mArch; } + size_t SizeOf() const { + return sizeof *this + mBreakpadId.Length() + mCodeId.Length() + + mModuleName.Length() * 2 + mModulePath.Length() * 2 + + mDebugName.Length() * 2 + mDebugPath.Length() * 2 + + mVersion.Length() + mArch.size(); + } + + SharedLibrary() : mStart{0}, mEnd{0}, mOffset{0} {} + + private: + uintptr_t mStart; + uintptr_t mEnd; + uintptr_t mOffset; + nsCString mBreakpadId; + // A string carrying an identifier for a binary. + // + // All platforms have different formats: + // - Windows: The code ID for a Windows PE file. + // It's the PE timestamp and PE image size. + // - macOS: The code ID for a macOS / iOS binary (mach-O). + // It's the mach-O UUID without dashes and without the trailing 0 for the + // breakpad ID. + // - Linux/Android: The code ID for a Linux ELF file. + // It's the complete build ID, as hex string. + nsCString mCodeId; + nsString mModuleName; + nsString mModulePath; + nsString mDebugName; + nsString mDebugPath; + nsCString mVersion; + std::string mArch; + + friend struct IPC::ParamTraits; +}; + +static bool CompareAddresses(const SharedLibrary& first, + const SharedLibrary& second) { + return first.GetStart() < second.GetStart(); +} + +class SharedLibraryInfo { + public: + static SharedLibraryInfo GetInfoForSelf(); +#ifdef XP_WIN + static SharedLibraryInfo GetInfoFromPath(const wchar_t* aPath); +#endif + + static void Initialize(); + + void AddSharedLibrary(SharedLibrary entry) { mEntries.push_back(entry); } + + void AddAllSharedLibraries(const SharedLibraryInfo& sharedLibraryInfo) { + mEntries.insert(mEntries.end(), sharedLibraryInfo.mEntries.begin(), + sharedLibraryInfo.mEntries.end()); + } + + const SharedLibrary& GetEntry(size_t i) const { return mEntries[i]; } + + SharedLibrary& GetMutableEntry(size_t i) { return mEntries[i]; } + + // Removes items in the range [first, last) + // i.e. element at the "last" index is not removed + void RemoveEntries(size_t first, size_t last) { + mEntries.erase(mEntries.begin() + first, mEntries.begin() + last); + } + + bool Contains(const SharedLibrary& searchItem) const { + return (mEntries.end() != + std::find(mEntries.begin(), mEntries.end(), searchItem)); + } + + size_t GetSize() const { return mEntries.size(); } + + void SortByAddress() { + std::sort(mEntries.begin(), mEntries.end(), CompareAddresses); + } + + // Remove duplicate entries from the vector. + // + // We purposefully don't use the operator== implementation of SharedLibrary + // because it compares all the fields including mStart, mEnd and mOffset which + // are not the same across different processes. + void DeduplicateEntries() { + static auto cmpSort = [](const SharedLibrary& a, const SharedLibrary& b) { + return std::tie(a.GetModuleName(), a.GetBreakpadId()) < + std::tie(b.GetModuleName(), b.GetBreakpadId()); + }; + static auto cmpEqual = [](const SharedLibrary& a, const SharedLibrary& b) { + return std::tie(a.GetModuleName(), a.GetBreakpadId()) == + std::tie(b.GetModuleName(), b.GetBreakpadId()); + }; + // std::unique requires the vector to be sorted first. It can only remove + // consecutive duplicate elements. + std::sort(mEntries.begin(), mEntries.end(), cmpSort); + // Remove the duplicates since it's sorted now. + mEntries.erase(std::unique(mEntries.begin(), mEntries.end(), cmpEqual), + mEntries.end()); + } + + void Clear() { mEntries.clear(); } + + size_t SizeOf() const { + size_t size = 0; + + for (const auto& item : mEntries) { + size += item.SizeOf(); + } + + return size; + } + + private: + std::vector mEntries; + + friend struct IPC::ParamTraits; +}; + +namespace IPC { +template <> +struct ParamTraits { + typedef SharedLibrary paramType; + + static void Write(MessageWriter* aWriter, const paramType& aParam); + static bool Read(MessageReader* aReader, paramType* aResult); +}; + +template <> +struct ParamTraits { + typedef SharedLibraryInfo paramType; + + static void Write(MessageWriter* aWriter, const paramType& aParam); + static bool Read(MessageReader* aReader, paramType* aResult); +}; +} // namespace IPC + +#endif -- cgit v1.2.3