summaryrefslogtreecommitdiffstats
path: root/xpcom/threads
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /xpcom/threads
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esr
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'xpcom/threads')
-rw-r--r--xpcom/threads/AbstractThread.cpp359
-rw-r--r--xpcom/threads/AbstractThread.h129
-rw-r--r--xpcom/threads/BlockingResourceBase.cpp547
-rw-r--r--xpcom/threads/BlockingResourceBase.h339
-rw-r--r--xpcom/threads/CPUUsageWatcher.cpp252
-rw-r--r--xpcom/threads/CPUUsageWatcher.h100
-rw-r--r--xpcom/threads/CondVar.h139
-rw-r--r--xpcom/threads/DataMutex.h130
-rw-r--r--xpcom/threads/DeadlockDetector.h359
-rw-r--r--xpcom/threads/DelayedRunnable.cpp113
-rw-r--r--xpcom/threads/DelayedRunnable.h53
-rw-r--r--xpcom/threads/EventQueue.cpp131
-rw-r--r--xpcom/threads/EventQueue.h136
-rw-r--r--xpcom/threads/EventTargetCapability.h95
-rw-r--r--xpcom/threads/IdlePeriodState.cpp255
-rw-r--r--xpcom/threads/IdlePeriodState.h201
-rw-r--r--xpcom/threads/IdleTaskRunner.cpp280
-rw-r--r--xpcom/threads/IdleTaskRunner.h122
-rw-r--r--xpcom/threads/InputTaskManager.cpp156
-rw-r--r--xpcom/threads/InputTaskManager.h141
-rw-r--r--xpcom/threads/LazyIdleThread.cpp126
-rw-r--r--xpcom/threads/LazyIdleThread.h93
-rw-r--r--xpcom/threads/LeakRefPtr.h48
-rw-r--r--xpcom/threads/MainThreadIdlePeriod.cpp78
-rw-r--r--xpcom/threads/MainThreadIdlePeriod.h31
-rw-r--r--xpcom/threads/MainThreadUtils.h60
-rw-r--r--xpcom/threads/Monitor.h316
-rw-r--r--xpcom/threads/MozPromise.h1763
-rw-r--r--xpcom/threads/MozPromiseInlines.h48
-rw-r--r--xpcom/threads/Mutex.h452
-rw-r--r--xpcom/threads/PerformanceCounter.cpp73
-rw-r--r--xpcom/threads/PerformanceCounter.h139
-rw-r--r--xpcom/threads/Queue.h265
-rw-r--r--xpcom/threads/RWLock.cpp28
-rw-r--r--xpcom/threads/RWLock.h243
-rw-r--r--xpcom/threads/RecursiveMutex.cpp85
-rw-r--r--xpcom/threads/RecursiveMutex.h120
-rw-r--r--xpcom/threads/ReentrantMonitor.h251
-rw-r--r--xpcom/threads/SchedulerGroup.cpp125
-rw-r--r--xpcom/threads/SchedulerGroup.h87
-rw-r--r--xpcom/threads/SharedThreadPool.cpp221
-rw-r--r--xpcom/threads/SharedThreadPool.h130
-rw-r--r--xpcom/threads/SpinEventLoopUntil.h191
-rw-r--r--xpcom/threads/StateMirroring.h393
-rw-r--r--xpcom/threads/StateWatching.h302
-rw-r--r--xpcom/threads/SyncRunnable.h157
-rw-r--r--xpcom/threads/SynchronizedEventQueue.cpp26
-rw-r--r--xpcom/threads/SynchronizedEventQueue.h131
-rw-r--r--xpcom/threads/TaskCategory.h47
-rw-r--r--xpcom/threads/TaskController.cpp1072
-rw-r--r--xpcom/threads/TaskController.h445
-rw-r--r--xpcom/threads/TaskDispatcher.h304
-rw-r--r--xpcom/threads/TaskQueue.cpp347
-rw-r--r--xpcom/threads/TaskQueue.h281
-rw-r--r--xpcom/threads/ThreadBound.h143
-rw-r--r--xpcom/threads/ThreadDelay.cpp38
-rw-r--r--xpcom/threads/ThreadDelay.h16
-rw-r--r--xpcom/threads/ThreadEventQueue.cpp324
-rw-r--r--xpcom/threads/ThreadEventQueue.h95
-rw-r--r--xpcom/threads/ThreadEventTarget.cpp136
-rw-r--r--xpcom/threads/ThreadEventTarget.h63
-rw-r--r--xpcom/threads/ThreadLocalVariables.cpp16
-rw-r--r--xpcom/threads/ThrottledEventQueue.cpp459
-rw-r--r--xpcom/threads/ThrottledEventQueue.h118
-rw-r--r--xpcom/threads/TimerThread.cpp1512
-rw-r--r--xpcom/threads/TimerThread.h243
-rw-r--r--xpcom/threads/VsyncTaskManager.cpp22
-rw-r--r--xpcom/threads/VsyncTaskManager.h26
-rw-r--r--xpcom/threads/WinHandleWatcher.cpp303
-rw-r--r--xpcom/threads/WinHandleWatcher.h117
-rw-r--r--xpcom/threads/components.conf29
-rw-r--r--xpcom/threads/moz.build148
-rw-r--r--xpcom/threads/nsEnvironment.cpp136
-rw-r--r--xpcom/threads/nsEnvironment.h34
-rw-r--r--xpcom/threads/nsICancelableRunnable.h40
-rw-r--r--xpcom/threads/nsIDirectTaskDispatcher.idl57
-rw-r--r--xpcom/threads/nsIDiscardableRunnable.h41
-rw-r--r--xpcom/threads/nsIEnvironment.idl54
-rw-r--r--xpcom/threads/nsIEventTarget.idl227
-rw-r--r--xpcom/threads/nsIIdlePeriod.idl32
-rw-r--r--xpcom/threads/nsIIdleRunnable.h48
-rw-r--r--xpcom/threads/nsINamed.idl24
-rw-r--r--xpcom/threads/nsIProcess.idl112
-rw-r--r--xpcom/threads/nsIRunnable.idl45
-rw-r--r--xpcom/threads/nsISerialEventTarget.idl27
-rw-r--r--xpcom/threads/nsISupportsPriority.idl45
-rw-r--r--xpcom/threads/nsITargetShutdownTask.h37
-rw-r--r--xpcom/threads/nsIThread.idl222
-rw-r--r--xpcom/threads/nsIThreadInternal.idl110
-rw-r--r--xpcom/threads/nsIThreadManager.idl173
-rw-r--r--xpcom/threads/nsIThreadPool.idl115
-rw-r--r--xpcom/threads/nsIThreadShutdown.idl57
-rw-r--r--xpcom/threads/nsITimer.idl376
-rw-r--r--xpcom/threads/nsMemoryPressure.cpp104
-rw-r--r--xpcom/threads/nsMemoryPressure.h77
-rw-r--r--xpcom/threads/nsProcess.h82
-rw-r--r--xpcom/threads/nsProcessCommon.cpp600
-rw-r--r--xpcom/threads/nsProxyRelease.cpp30
-rw-r--r--xpcom/threads/nsProxyRelease.h390
-rw-r--r--xpcom/threads/nsThread.cpp1609
-rw-r--r--xpcom/threads/nsThread.h400
-rw-r--r--xpcom/threads/nsThreadManager.cpp798
-rw-r--r--xpcom/threads/nsThreadManager.h117
-rw-r--r--xpcom/threads/nsThreadPool.cpp611
-rw-r--r--xpcom/threads/nsThreadPool.h68
-rw-r--r--xpcom/threads/nsThreadSyncDispatch.h65
-rw-r--r--xpcom/threads/nsThreadUtils.cpp768
-rw-r--r--xpcom/threads/nsThreadUtils.h1925
-rw-r--r--xpcom/threads/nsTimerImpl.cpp820
-rw-r--r--xpcom/threads/nsTimerImpl.h231
110 files changed, 27030 insertions, 0 deletions
diff --git a/xpcom/threads/AbstractThread.cpp b/xpcom/threads/AbstractThread.cpp
new file mode 100644
index 0000000000..61289a6789
--- /dev/null
+++ b/xpcom/threads/AbstractThread.cpp
@@ -0,0 +1,359 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/AbstractThread.h"
+
+#include "mozilla/ClearOnShutdown.h"
+#include "mozilla/DelayedRunnable.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MozPromise.h" // We initialize the MozPromise logging in this file.
+#include "mozilla/ProfilerRunnable.h"
+#include "mozilla/StateWatching.h" // We initialize the StateWatching logging in this file.
+#include "mozilla/StaticPtr.h"
+#include "mozilla/TaskDispatcher.h"
+#include "mozilla/TaskQueue.h"
+#include "mozilla/Unused.h"
+#include "nsContentUtils.h"
+#include "nsIDirectTaskDispatcher.h"
+#include "nsIThreadInternal.h"
+#include "nsServiceManagerUtils.h"
+#include "nsThreadManager.h"
+#include "nsThreadUtils.h"
+#include <memory>
+
+namespace mozilla {
+
+LazyLogModule gMozPromiseLog("MozPromise");
+LazyLogModule gStateWatchingLog("StateWatching");
+
+StaticRefPtr<AbstractThread> sMainThread;
+MOZ_THREAD_LOCAL(AbstractThread*) AbstractThread::sCurrentThreadTLS;
+
+class XPCOMThreadWrapper final : public AbstractThread,
+ public nsIThreadObserver,
+ public nsIDirectTaskDispatcher {
+ public:
+ XPCOMThreadWrapper(nsIThreadInternal* aThread, bool aRequireTailDispatch,
+ bool aOnThread)
+ : AbstractThread(aRequireTailDispatch),
+ mThread(aThread),
+ mDirectTaskDispatcher(do_QueryInterface(aThread)),
+ mOnThread(aOnThread) {
+ MOZ_DIAGNOSTIC_ASSERT(mThread && mDirectTaskDispatcher);
+ MOZ_DIAGNOSTIC_ASSERT(!aOnThread || IsCurrentThreadIn());
+ if (aOnThread) {
+ MOZ_ASSERT(!sCurrentThreadTLS.get(),
+ "There can only be a single XPCOMThreadWrapper available on a "
+ "thread");
+ // Set the default current thread so that GetCurrent() never returns
+ // nullptr.
+ sCurrentThreadTLS.set(this);
+ }
+ }
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ nsresult Dispatch(already_AddRefed<nsIRunnable> aRunnable,
+ DispatchReason aReason = NormalDispatch) override {
+ nsCOMPtr<nsIRunnable> r = aRunnable;
+ AbstractThread* currentThread;
+ if (aReason != TailDispatch && (currentThread = GetCurrent()) &&
+ RequiresTailDispatch(currentThread) &&
+ currentThread->IsTailDispatcherAvailable()) {
+ return currentThread->TailDispatcher().AddTask(this, r.forget());
+ }
+
+ // At a certain point during shutdown, we stop processing events from the
+ // main thread event queue (this happens long after all _other_ XPCOM
+ // threads have been shut down). However, various bits of subsequent
+ // teardown logic (the media shutdown blocker and the final shutdown cycle
+ // collection) can trigger state watching and state mirroring notifications
+ // that result in dispatch to the main thread. This causes shutdown leaks,
+ // because the |Runner| wrapper below creates a guaranteed cycle
+ // (Thread->EventQueue->Runnable->Thread) until the event is processed. So
+ // if we put the event into a queue that will never be processed, we'll wind
+ // up with a leak.
+ //
+ // We opt to just release the runnable in that case. Ordinarily, this
+ // approach could cause problems for runnables that are only safe to be
+ // released on the target thread (and not the dispatching thread). This is
+ // why XPCOM thread dispatch explicitly leaks the runnable when dispatch
+ // fails, rather than releasing it. But given that this condition only
+ // applies very late in shutdown when only one thread remains operational,
+ // that concern is unlikely to apply.
+ if (gXPCOMMainThreadEventsAreDoomed) {
+ return NS_ERROR_FAILURE;
+ }
+
+ RefPtr<nsIRunnable> runner = new Runner(this, r.forget());
+ return mThread->Dispatch(runner.forget(), NS_DISPATCH_NORMAL);
+ }
+
+ // Prevent a GCC warning about the other overload of Dispatch being hidden.
+ using AbstractThread::Dispatch;
+
+ NS_IMETHOD RegisterShutdownTask(nsITargetShutdownTask* aTask) override {
+ return mThread->RegisterShutdownTask(aTask);
+ }
+
+ NS_IMETHOD UnregisterShutdownTask(nsITargetShutdownTask* aTask) override {
+ return mThread->UnregisterShutdownTask(aTask);
+ }
+
+ bool IsCurrentThreadIn() const override {
+ return mThread->IsOnCurrentThread();
+ }
+
+ TaskDispatcher& TailDispatcher() override {
+ MOZ_ASSERT(IsCurrentThreadIn());
+ MOZ_ASSERT(IsTailDispatcherAvailable());
+ if (!mTailDispatcher) {
+ mTailDispatcher =
+ std::make_unique<AutoTaskDispatcher>(mDirectTaskDispatcher,
+ /* aIsTailDispatcher = */ true);
+ mThread->AddObserver(this);
+ }
+
+ return *mTailDispatcher;
+ }
+
+ bool IsTailDispatcherAvailable() override {
+ // Our tail dispatching implementation relies on nsIThreadObserver
+ // callbacks. If we're not doing event processing, it won't work.
+ bool inEventLoop =
+ static_cast<nsThread*>(mThread.get())->RecursionDepth() > 0;
+ return inEventLoop;
+ }
+
+ bool MightHaveTailTasks() override { return !!mTailDispatcher; }
+
+ nsIEventTarget* AsEventTarget() override { return mThread; }
+
+ //-----------------------------------------------------------------------------
+ // nsIThreadObserver
+ //-----------------------------------------------------------------------------
+ NS_IMETHOD OnDispatchedEvent() override { return NS_OK; }
+
+ NS_IMETHOD AfterProcessNextEvent(nsIThreadInternal* thread,
+ bool eventWasProcessed) override {
+ // This is the primary case.
+ MaybeFireTailDispatcher();
+ return NS_OK;
+ }
+
+ NS_IMETHOD OnProcessNextEvent(nsIThreadInternal* thread,
+ bool mayWait) override {
+ // In general, the tail dispatcher is handled at the end of the current in
+ // AfterProcessNextEvent() above. However, if start spinning a nested event
+ // loop, it's generally better to fire the tail dispatcher before the first
+ // nested event, rather than after it. This check handles that case.
+ MaybeFireTailDispatcher();
+ return NS_OK;
+ }
+
+ //-----------------------------------------------------------------------------
+ // nsIDirectTaskDispatcher
+ //-----------------------------------------------------------------------------
+ // Forward calls to nsIDirectTaskDispatcher to the underlying nsThread object.
+ // We can't use the generated NS_FORWARD_NSIDIRECTTASKDISPATCHER macro
+ // as already_AddRefed type must be moved.
+ NS_IMETHOD DispatchDirectTask(already_AddRefed<nsIRunnable> aEvent) override {
+ return mDirectTaskDispatcher->DispatchDirectTask(std::move(aEvent));
+ }
+ NS_IMETHOD DrainDirectTasks() override {
+ return mDirectTaskDispatcher->DrainDirectTasks();
+ }
+ NS_IMETHOD HaveDirectTasks(bool* aResult) override {
+ return mDirectTaskDispatcher->HaveDirectTasks(aResult);
+ }
+
+ private:
+ const RefPtr<nsIThreadInternal> mThread;
+ const nsCOMPtr<nsIDirectTaskDispatcher> mDirectTaskDispatcher;
+ std::unique_ptr<AutoTaskDispatcher> mTailDispatcher;
+ const bool mOnThread;
+
+ ~XPCOMThreadWrapper() {
+ if (mOnThread) {
+ MOZ_DIAGNOSTIC_ASSERT(IsCurrentThreadIn(),
+ "Must be destroyed on the thread it was created");
+ sCurrentThreadTLS.set(nullptr);
+ }
+ }
+
+ void MaybeFireTailDispatcher() {
+ if (mTailDispatcher) {
+ mTailDispatcher->DrainDirectTasks();
+ mThread->RemoveObserver(this);
+ mTailDispatcher.reset();
+ }
+ }
+
+ class Runner : public Runnable {
+ public:
+ explicit Runner(XPCOMThreadWrapper* aThread,
+ already_AddRefed<nsIRunnable> aRunnable)
+ : Runnable("XPCOMThreadWrapper::Runner"),
+ mThread(aThread),
+ mRunnable(aRunnable) {}
+
+ NS_IMETHOD Run() override {
+ MOZ_ASSERT(mThread == AbstractThread::GetCurrent());
+ MOZ_ASSERT(mThread->IsCurrentThreadIn());
+ SerialEventTargetGuard guard(mThread);
+ AUTO_PROFILE_FOLLOWING_RUNNABLE(mRunnable);
+ return mRunnable->Run();
+ }
+
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ NS_IMETHOD GetName(nsACString& aName) override {
+ aName.AssignLiteral("AbstractThread::Runner");
+ if (nsCOMPtr<nsINamed> named = do_QueryInterface(mRunnable)) {
+ nsAutoCString name;
+ named->GetName(name);
+ if (!name.IsEmpty()) {
+ aName.AppendLiteral(" for ");
+ aName.Append(name);
+ }
+ }
+ return NS_OK;
+ }
+#endif
+
+ private:
+ const RefPtr<XPCOMThreadWrapper> mThread;
+ const RefPtr<nsIRunnable> mRunnable;
+ };
+};
+
+NS_IMPL_ISUPPORTS(XPCOMThreadWrapper, nsIThreadObserver,
+ nsIDirectTaskDispatcher, nsISerialEventTarget, nsIEventTarget)
+
+NS_IMETHODIMP_(bool)
+AbstractThread::IsOnCurrentThreadInfallible() { return IsCurrentThreadIn(); }
+
+NS_IMETHODIMP
+AbstractThread::IsOnCurrentThread(bool* aResult) {
+ *aResult = IsCurrentThreadIn();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+AbstractThread::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) {
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return Dispatch(event.forget(), aFlags);
+}
+
+NS_IMETHODIMP
+AbstractThread::Dispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aFlags) {
+ return Dispatch(std::move(aEvent), NormalDispatch);
+}
+
+NS_IMETHODIMP
+AbstractThread::DelayedDispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aDelayMs) {
+ nsCOMPtr<nsIRunnable> event = aEvent;
+ NS_ENSURE_TRUE(!!aDelayMs, NS_ERROR_UNEXPECTED);
+
+ RefPtr<DelayedRunnable> r =
+ new DelayedRunnable(do_AddRef(this), event.forget(), aDelayMs);
+ nsresult rv = r->Init();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return Dispatch(r.forget(), NS_DISPATCH_NORMAL);
+}
+
+nsresult AbstractThread::TailDispatchTasksFor(AbstractThread* aThread) {
+ if (MightHaveTailTasks()) {
+ return TailDispatcher().DispatchTasksFor(aThread);
+ }
+
+ return NS_OK;
+}
+
+bool AbstractThread::HasTailTasksFor(AbstractThread* aThread) {
+ if (!MightHaveTailTasks()) {
+ return false;
+ }
+ return TailDispatcher().HasTasksFor(aThread);
+}
+
+bool AbstractThread::RequiresTailDispatch(AbstractThread* aThread) const {
+ MOZ_ASSERT(aThread);
+ // We require tail dispatch if both the source and destination
+ // threads support it.
+ return SupportsTailDispatch() && aThread->SupportsTailDispatch();
+}
+
+bool AbstractThread::RequiresTailDispatchFromCurrentThread() const {
+ AbstractThread* current = GetCurrent();
+ return current && RequiresTailDispatch(current);
+}
+
+AbstractThread* AbstractThread::MainThread() {
+ MOZ_ASSERT(sMainThread);
+ return sMainThread;
+}
+
+void AbstractThread::InitTLS() {
+ if (!sCurrentThreadTLS.init()) {
+ MOZ_CRASH();
+ }
+}
+
+void AbstractThread::InitMainThread() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(!sMainThread);
+ nsCOMPtr<nsIThreadInternal> mainThread =
+ do_QueryInterface(nsThreadManager::get().GetMainThreadWeak());
+ MOZ_DIAGNOSTIC_ASSERT(mainThread);
+
+ if (!sCurrentThreadTLS.init()) {
+ MOZ_CRASH();
+ }
+ sMainThread = new XPCOMThreadWrapper(mainThread.get(),
+ /* aRequireTailDispatch = */ true,
+ true /* onThread */);
+}
+
+void AbstractThread::ShutdownMainThread() {
+ MOZ_ASSERT(NS_IsMainThread());
+ sMainThread = nullptr;
+}
+
+void AbstractThread::DispatchStateChange(
+ already_AddRefed<nsIRunnable> aRunnable) {
+ AbstractThread* currentThread = GetCurrent();
+ MOZ_DIAGNOSTIC_ASSERT(currentThread, "An AbstractThread must exist");
+ if (currentThread->IsTailDispatcherAvailable()) {
+ currentThread->TailDispatcher().AddStateChangeTask(this,
+ std::move(aRunnable));
+ } else {
+ // If the tail dispatcher isn't available, we just avoid sending state
+ // updates.
+ //
+ // This happens, specifically (1) During async shutdown (via the media
+ // shutdown blocker), and (2) During the final shutdown cycle collection.
+ // Both of these trigger changes to various watched and mirrored state.
+ nsCOMPtr<nsIRunnable> neverDispatched = aRunnable;
+ }
+}
+
+/* static */
+void AbstractThread::DispatchDirectTask(
+ already_AddRefed<nsIRunnable> aRunnable) {
+ AbstractThread* currentThread = GetCurrent();
+ MOZ_DIAGNOSTIC_ASSERT(currentThread, "An AbstractThread must exist");
+ if (currentThread->IsTailDispatcherAvailable()) {
+ currentThread->TailDispatcher().AddDirectTask(std::move(aRunnable));
+ } else {
+ // If the tail dispatcher isn't available, we post as a regular task.
+ currentThread->Dispatch(std::move(aRunnable));
+ }
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/AbstractThread.h b/xpcom/threads/AbstractThread.h
new file mode 100644
index 0000000000..b53bcf8ca3
--- /dev/null
+++ b/xpcom/threads/AbstractThread.h
@@ -0,0 +1,129 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(AbstractThread_h_)
+# define AbstractThread_h_
+
+# include "mozilla/AlreadyAddRefed.h"
+# include "mozilla/ThreadLocal.h"
+# include "nscore.h"
+# include "nsISerialEventTarget.h"
+# include "nsISupports.h"
+
+class nsIEventTarget;
+class nsIRunnable;
+class nsIThread;
+
+namespace mozilla {
+
+class TaskDispatcher;
+
+/*
+ * We often want to run tasks on a target that guarantees that events will never
+ * run in parallel. There are various target types that achieve this - namely
+ * nsIThread and TaskQueue. Note that nsIThreadPool (which implements
+ * nsIEventTarget) does not have this property, so we do not want to use
+ * nsIEventTarget for this purpose. This class encapsulates the specifics of
+ * the structures we might use here and provides a consistent interface.
+ *
+ * At present, the supported AbstractThread implementations are TaskQueue,
+ * AbstractThread::MainThread() and XPCOMThreadWrapper which can wrap any
+ * nsThread.
+ *
+ * The primary use of XPCOMThreadWrapper is to allow any threads to provide
+ * Direct Task dispatching which is similar (but not identical to) the microtask
+ * semantics of JS promises. Instantiating a XPCOMThreadWrapper on the current
+ * nsThread is sufficient to enable direct task dispatching.
+ *
+ * You shouldn't use pointers when comparing AbstractThread or nsIThread to
+ * determine if you are currently on the thread, but instead use the
+ * nsISerialEventTarget::IsOnCurrentThread() method.
+ */
+class AbstractThread : public nsISerialEventTarget {
+ public:
+ // Returns the AbstractThread that the caller is currently running in, or null
+ // if the caller is not running in an AbstractThread.
+ static AbstractThread* GetCurrent() { return sCurrentThreadTLS.get(); }
+
+ AbstractThread(bool aSupportsTailDispatch)
+ : mSupportsTailDispatch(aSupportsTailDispatch) {}
+
+ // We don't use NS_DECL_NSIEVENTTARGET so that we can remove the default
+ // |flags| parameter from Dispatch. Otherwise, a single-argument Dispatch call
+ // would be ambiguous.
+ using nsISerialEventTarget::IsOnCurrentThread;
+ NS_IMETHOD_(bool) IsOnCurrentThreadInfallible(void) override;
+ NS_IMETHOD IsOnCurrentThread(bool* _retval) override;
+ NS_IMETHOD Dispatch(already_AddRefed<nsIRunnable> event,
+ uint32_t flags) override;
+ NS_IMETHOD DispatchFromScript(nsIRunnable* event, uint32_t flags) override;
+ NS_IMETHOD DelayedDispatch(already_AddRefed<nsIRunnable> event,
+ uint32_t delay) override;
+
+ enum DispatchReason { NormalDispatch, TailDispatch };
+ virtual nsresult Dispatch(already_AddRefed<nsIRunnable> aRunnable,
+ DispatchReason aReason = NormalDispatch) = 0;
+
+ virtual bool IsCurrentThreadIn() const = 0;
+
+ // Returns a TaskDispatcher that will dispatch its tasks when the currently-
+ // running tasks pops off the stack.
+ //
+ // May only be called when running within the it is invoked up, and only on
+ // threads which support it.
+ virtual TaskDispatcher& TailDispatcher() = 0;
+
+ // Returns true if we have tail tasks scheduled, or if this isn't known.
+ // Returns false if we definitely don't have any tail tasks.
+ virtual bool MightHaveTailTasks() { return true; }
+
+ // Returns true if the tail dispatcher is available. In certain edge cases
+ // like shutdown, it might not be.
+ virtual bool IsTailDispatcherAvailable() { return true; }
+
+ // Helper functions for methods on the tail TasklDispatcher. These check
+ // HasTailTasks to avoid allocating a TailDispatcher if it isn't
+ // needed.
+ nsresult TailDispatchTasksFor(AbstractThread* aThread);
+ bool HasTailTasksFor(AbstractThread* aThread);
+
+ // Returns true if this supports the tail dispatcher.
+ bool SupportsTailDispatch() const { return mSupportsTailDispatch; }
+
+ // Returns true if this thread requires all dispatches originating from
+ // aThread go through the tail dispatcher.
+ bool RequiresTailDispatch(AbstractThread* aThread) const;
+ bool RequiresTailDispatchFromCurrentThread() const;
+
+ virtual nsIEventTarget* AsEventTarget() { MOZ_CRASH("Not an event target!"); }
+
+ // Returns the non-DocGroup version of AbstractThread on the main thread.
+ // A DocGroup-versioned one is available in
+ // DispatcherTrait::AbstractThreadFor(). Note:
+ // DispatcherTrait::AbstractThreadFor() SHALL be used when possible.
+ static AbstractThread* MainThread();
+
+ // Must be called exactly once during startup.
+ static void InitTLS();
+ static void InitMainThread();
+ static void ShutdownMainThread();
+
+ void DispatchStateChange(already_AddRefed<nsIRunnable> aRunnable);
+
+ static void DispatchDirectTask(already_AddRefed<nsIRunnable> aRunnable);
+
+ protected:
+ virtual ~AbstractThread() = default;
+ static MOZ_THREAD_LOCAL(AbstractThread*) sCurrentThreadTLS;
+
+ // True if we want to require that every task dispatched from tasks running in
+ // this queue go through our queue's tail dispatcher.
+ const bool mSupportsTailDispatch;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/BlockingResourceBase.cpp b/xpcom/threads/BlockingResourceBase.cpp
new file mode 100644
index 0000000000..c2ba82e07a
--- /dev/null
+++ b/xpcom/threads/BlockingResourceBase.cpp
@@ -0,0 +1,547 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/BlockingResourceBase.h"
+
+#ifdef DEBUG
+# include "prthread.h"
+
+# ifndef MOZ_CALLSTACK_DISABLED
+# include "CodeAddressService.h"
+# include "nsHashKeys.h"
+# include "mozilla/StackWalk.h"
+# include "nsTHashtable.h"
+# endif
+
+# include "mozilla/Attributes.h"
+# include "mozilla/CondVar.h"
+# include "mozilla/DeadlockDetector.h"
+# include "mozilla/RecursiveMutex.h"
+# include "mozilla/ReentrantMonitor.h"
+# include "mozilla/Mutex.h"
+# include "mozilla/RWLock.h"
+# include "mozilla/UniquePtr.h"
+
+# if defined(MOZILLA_INTERNAL_API)
+# include "mozilla/ProfilerThreadSleep.h"
+# endif // MOZILLA_INTERNAL_API
+
+#endif // ifdef DEBUG
+
+namespace mozilla {
+//
+// BlockingResourceBase implementation
+//
+
+// static members
+const char* const BlockingResourceBase::kResourceTypeName[] = {
+ // needs to be kept in sync with BlockingResourceType
+ "Mutex", "ReentrantMonitor", "CondVar", "RecursiveMutex"};
+
+#ifdef DEBUG
+
+PRCallOnceType BlockingResourceBase::sCallOnce;
+MOZ_THREAD_LOCAL(BlockingResourceBase*)
+BlockingResourceBase::sResourceAcqnChainFront;
+BlockingResourceBase::DDT* BlockingResourceBase::sDeadlockDetector;
+
+void BlockingResourceBase::StackWalkCallback(uint32_t aFrameNumber, void* aPc,
+ void* aSp, void* aClosure) {
+# ifndef MOZ_CALLSTACK_DISABLED
+ AcquisitionState* state = (AcquisitionState*)aClosure;
+ state->ref().AppendElement(aPc);
+# endif
+}
+
+void BlockingResourceBase::GetStackTrace(AcquisitionState& aState,
+ const void* aFirstFramePC) {
+# ifndef MOZ_CALLSTACK_DISABLED
+ // Clear the array...
+ aState.reset();
+ // ...and create a new one; this also puts the state to 'acquired' status
+ // regardless of whether we obtain a stack trace or not.
+ aState.emplace();
+
+ MozStackWalk(StackWalkCallback, aFirstFramePC, kAcquisitionStateStackSize,
+ aState.ptr());
+# endif
+}
+
+/**
+ * PrintCycle
+ * Append to |aOut| detailed information about the circular
+ * dependency in |aCycle|. Returns true if it *appears* that this
+ * cycle may represent an imminent deadlock, but this is merely a
+ * heuristic; the value returned may be a false positive or false
+ * negative.
+ *
+ * *NOT* thread safe. Calls |Print()|.
+ *
+ * FIXME bug 456272 hack alert: because we can't write call
+ * contexts into strings, all info is written to stderr, but only
+ * some info is written into |aOut|
+ */
+static bool PrintCycle(
+ const BlockingResourceBase::DDT::ResourceAcquisitionArray& aCycle,
+ nsACString& aOut) {
+ NS_ASSERTION(aCycle.Length() > 1, "need > 1 element for cycle!");
+
+ bool maybeImminent = true;
+
+ fputs("=== Cyclical dependency starts at\n", stderr);
+ aOut += "Cyclical dependency starts at\n";
+
+ const BlockingResourceBase::DDT::ResourceAcquisitionArray::value_type res =
+ aCycle.ElementAt(0);
+ maybeImminent &= res->Print(aOut);
+
+ BlockingResourceBase::DDT::ResourceAcquisitionArray::index_type i;
+ BlockingResourceBase::DDT::ResourceAcquisitionArray::size_type len =
+ aCycle.Length();
+ const BlockingResourceBase::DDT::ResourceAcquisitionArray::value_type* it =
+ 1 + aCycle.Elements();
+ for (i = 1; i < len - 1; ++i, ++it) {
+ fputs("\n--- Next dependency:\n", stderr);
+ aOut += "\nNext dependency:\n";
+
+ maybeImminent &= (*it)->Print(aOut);
+ }
+
+ fputs("\n=== Cycle completed at\n", stderr);
+ aOut += "Cycle completed at\n";
+ (*it)->Print(aOut);
+
+ return maybeImminent;
+}
+
+bool BlockingResourceBase::Print(nsACString& aOut) const {
+ fprintf(stderr, "--- %s : %s", kResourceTypeName[mType], mName);
+ aOut += BlockingResourceBase::kResourceTypeName[mType];
+ aOut += " : ";
+ aOut += mName;
+
+ bool acquired = IsAcquired();
+
+ if (acquired) {
+ fputs(" (currently acquired)\n", stderr);
+ aOut += " (currently acquired)\n";
+ }
+
+ fputs(" calling context\n", stderr);
+# ifdef MOZ_CALLSTACK_DISABLED
+ fputs(" [stack trace unavailable]\n", stderr);
+# else
+ const AcquisitionState& state = acquired ? mAcquired : mFirstSeen;
+
+ CodeAddressService<> addressService;
+
+ for (uint32_t i = 0; i < state.ref().Length(); i++) {
+ const size_t kMaxLength = 1024;
+ char buffer[kMaxLength];
+ addressService.GetLocation(i + 1, state.ref()[i], buffer, kMaxLength);
+ const char* fmt = " %s\n";
+ aOut.AppendLiteral(" ");
+ aOut.Append(buffer);
+ aOut.AppendLiteral("\n");
+ fprintf(stderr, fmt, buffer);
+ }
+
+# endif
+
+ return acquired;
+}
+
+BlockingResourceBase::BlockingResourceBase(
+ const char* aName, BlockingResourceBase::BlockingResourceType aType)
+ : mName(aName),
+ mType(aType)
+# ifdef MOZ_CALLSTACK_DISABLED
+ ,
+ mAcquired(false)
+# else
+ ,
+ mAcquired()
+# endif
+{
+ MOZ_ASSERT(mName, "Name must be nonnull");
+ // PR_CallOnce guaranatees that InitStatics is called in a
+ // thread-safe way
+ if (PR_SUCCESS != PR_CallOnce(&sCallOnce, InitStatics)) {
+ MOZ_CRASH("can't initialize blocking resource static members");
+ }
+
+ mChainPrev = 0;
+ sDeadlockDetector->Add(this);
+}
+
+BlockingResourceBase::~BlockingResourceBase() {
+ // we don't check for really obviously bad things like freeing
+ // Mutexes while they're still locked. it is assumed that the
+ // base class, or its underlying primitive, will check for such
+ // stupid mistakes.
+ mChainPrev = 0; // racy only for stupidly buggy client code
+ if (sDeadlockDetector) {
+ sDeadlockDetector->Remove(this);
+ }
+}
+
+size_t BlockingResourceBase::SizeOfDeadlockDetector(
+ MallocSizeOf aMallocSizeOf) {
+ return sDeadlockDetector
+ ? sDeadlockDetector->SizeOfIncludingThis(aMallocSizeOf)
+ : 0;
+}
+
+PRStatus BlockingResourceBase::InitStatics() {
+ MOZ_ASSERT(sResourceAcqnChainFront.init());
+ sDeadlockDetector = new DDT();
+ if (!sDeadlockDetector) {
+ MOZ_CRASH("can't allocate deadlock detector");
+ }
+ return PR_SUCCESS;
+}
+
+void BlockingResourceBase::Shutdown() {
+ delete sDeadlockDetector;
+ sDeadlockDetector = 0;
+}
+
+MOZ_NEVER_INLINE void BlockingResourceBase::CheckAcquire() {
+ if (mType == eCondVar) {
+ MOZ_ASSERT_UNREACHABLE(
+ "FIXME bug 456272: annots. to allow CheckAcquire()ing condvars");
+ return;
+ }
+
+ BlockingResourceBase* chainFront = ResourceChainFront();
+ mozilla::UniquePtr<DDT::ResourceAcquisitionArray> cycle(
+ sDeadlockDetector->CheckAcquisition(chainFront ? chainFront : 0, this));
+ if (!cycle) {
+ return;
+ }
+
+# ifndef MOZ_CALLSTACK_DISABLED
+ // Update the current stack before printing.
+ GetStackTrace(mAcquired, CallerPC());
+# endif
+
+ fputs("###!!! ERROR: Potential deadlock detected:\n", stderr);
+ nsAutoCString out("Potential deadlock detected:\n");
+ bool maybeImminent = PrintCycle(*cycle, out);
+
+ if (maybeImminent) {
+ fputs("\n###!!! Deadlock may happen NOW!\n\n", stderr);
+ out.AppendLiteral("\n###!!! Deadlock may happen NOW!\n\n");
+ } else {
+ fputs("\nDeadlock may happen for some other execution\n\n", stderr);
+ out.AppendLiteral("\nDeadlock may happen for some other execution\n\n");
+ }
+
+ // Only error out if we think a deadlock is imminent.
+ if (maybeImminent) {
+ NS_ERROR(out.get());
+ } else {
+ NS_WARNING(out.get());
+ }
+}
+
+MOZ_NEVER_INLINE void BlockingResourceBase::Acquire() {
+ if (mType == eCondVar) {
+ MOZ_ASSERT_UNREACHABLE(
+ "FIXME bug 456272: annots. to allow Acquire()ing condvars");
+ return;
+ }
+ NS_ASSERTION(!IsAcquired(), "reacquiring already acquired resource");
+
+ ResourceChainAppend(ResourceChainFront());
+
+# ifdef MOZ_CALLSTACK_DISABLED
+ mAcquired = true;
+# else
+ // Take a stack snapshot.
+ GetStackTrace(mAcquired, CallerPC());
+ MOZ_ASSERT(IsAcquired());
+
+ if (!mFirstSeen) {
+ mFirstSeen = mAcquired.map(
+ [](AcquisitionState::ValueType& state) { return state.Clone(); });
+ }
+# endif
+}
+
+void BlockingResourceBase::Release() {
+ if (mType == eCondVar) {
+ MOZ_ASSERT_UNREACHABLE(
+ "FIXME bug 456272: annots. to allow Release()ing condvars");
+ return;
+ }
+
+ BlockingResourceBase* chainFront = ResourceChainFront();
+ NS_ASSERTION(chainFront && IsAcquired(),
+ "Release()ing something that hasn't been Acquire()ed");
+
+ if (chainFront == this) {
+ ResourceChainRemove();
+ } else {
+ // remove this resource from wherever it lives in the chain
+ // we walk backwards in order of acquisition:
+ // (1) ...node<-prev<-curr...
+ // / /
+ // (2) ...prev<-curr...
+ BlockingResourceBase* curr = chainFront;
+ BlockingResourceBase* prev = nullptr;
+ while (curr && (prev = curr->mChainPrev) && (prev != this)) {
+ curr = prev;
+ }
+ if (prev == this) {
+ curr->mChainPrev = prev->mChainPrev;
+ }
+ }
+
+ ClearAcquisitionState();
+}
+
+//
+// Debug implementation of (OffTheBooks)Mutex
+void OffTheBooksMutex::Lock() {
+ CheckAcquire();
+ this->lock();
+ mOwningThread = PR_GetCurrentThread();
+ Acquire();
+}
+
+bool OffTheBooksMutex::TryLock() {
+ bool locked = this->tryLock();
+ if (locked) {
+ mOwningThread = PR_GetCurrentThread();
+ Acquire();
+ }
+ return locked;
+}
+
+void OffTheBooksMutex::Unlock() {
+ Release();
+ mOwningThread = nullptr;
+ this->unlock();
+}
+
+void OffTheBooksMutex::AssertCurrentThreadOwns() const {
+ MOZ_ASSERT(IsAcquired() && mOwningThread == PR_GetCurrentThread());
+}
+
+//
+// Debug implementation of RWLock
+//
+
+bool RWLock::TryReadLock() {
+ bool locked = this->detail::RWLockImpl::tryReadLock();
+ MOZ_ASSERT_IF(locked, mOwningThread == nullptr);
+ return locked;
+}
+
+void RWLock::ReadLock() {
+ // All we want to ensure here is that we're not attempting to acquire the
+ // read lock while this thread is holding the write lock.
+ CheckAcquire();
+ this->detail::RWLockImpl::readLock();
+ MOZ_ASSERT(mOwningThread == nullptr);
+}
+
+void RWLock::ReadUnlock() {
+ MOZ_ASSERT(mOwningThread == nullptr);
+ this->detail::RWLockImpl::readUnlock();
+}
+
+bool RWLock::TryWriteLock() {
+ bool locked = this->detail::RWLockImpl::tryWriteLock();
+ if (locked) {
+ mOwningThread = PR_GetCurrentThread();
+ Acquire();
+ }
+ return locked;
+}
+
+void RWLock::WriteLock() {
+ CheckAcquire();
+ this->detail::RWLockImpl::writeLock();
+ mOwningThread = PR_GetCurrentThread();
+ Acquire();
+}
+
+void RWLock::WriteUnlock() {
+ Release();
+ mOwningThread = nullptr;
+ this->detail::RWLockImpl::writeUnlock();
+}
+
+//
+// Debug implementation of ReentrantMonitor
+void ReentrantMonitor::Enter() {
+ BlockingResourceBase* chainFront = ResourceChainFront();
+
+ // the code below implements monitor reentrancy semantics
+
+ if (this == chainFront) {
+ // immediately re-entered the monitor: acceptable
+ PR_EnterMonitor(mReentrantMonitor);
+ ++mEntryCount;
+ return;
+ }
+
+ // this is sort of a hack around not recording the thread that
+ // owns this monitor
+ if (chainFront) {
+ for (BlockingResourceBase* br = ResourceChainPrev(chainFront); br;
+ br = ResourceChainPrev(br)) {
+ if (br == this) {
+ NS_WARNING(
+ "Re-entering ReentrantMonitor after acquiring other resources.");
+
+ // show the caller why this is potentially bad
+ CheckAcquire();
+
+ PR_EnterMonitor(mReentrantMonitor);
+ ++mEntryCount;
+ return;
+ }
+ }
+ }
+
+ CheckAcquire();
+ PR_EnterMonitor(mReentrantMonitor);
+ NS_ASSERTION(mEntryCount == 0, "ReentrantMonitor isn't free!");
+ Acquire(); // protected by mReentrantMonitor
+ mEntryCount = 1;
+}
+
+void ReentrantMonitor::Exit() {
+ if (--mEntryCount == 0) {
+ Release(); // protected by mReentrantMonitor
+ }
+ PRStatus status = PR_ExitMonitor(mReentrantMonitor);
+ NS_ASSERTION(PR_SUCCESS == status, "bad ReentrantMonitor::Exit()");
+}
+
+nsresult ReentrantMonitor::Wait(PRIntervalTime aInterval) {
+ AssertCurrentThreadIn();
+
+ // save monitor state and reset it to empty
+ int32_t savedEntryCount = mEntryCount;
+ AcquisitionState savedAcquisitionState = TakeAcquisitionState();
+ BlockingResourceBase* savedChainPrev = mChainPrev;
+ mEntryCount = 0;
+ mChainPrev = 0;
+
+ nsresult rv;
+ {
+# if defined(MOZILLA_INTERNAL_API)
+ AUTO_PROFILER_THREAD_SLEEP;
+# endif
+ // give up the monitor until we're back from Wait()
+ rv = PR_Wait(mReentrantMonitor, aInterval) == PR_SUCCESS ? NS_OK
+ : NS_ERROR_FAILURE;
+ }
+
+ // restore saved state
+ mEntryCount = savedEntryCount;
+ SetAcquisitionState(std::move(savedAcquisitionState));
+ mChainPrev = savedChainPrev;
+
+ return rv;
+}
+
+//
+// Debug implementation of RecursiveMutex
+void RecursiveMutex::Lock() {
+ BlockingResourceBase* chainFront = ResourceChainFront();
+
+ // the code below implements mutex reentrancy semantics
+
+ if (this == chainFront) {
+ // immediately re-entered the mutex: acceptable
+ LockInternal();
+ ++mEntryCount;
+ return;
+ }
+
+ // this is sort of a hack around not recording the thread that
+ // owns this monitor
+ if (chainFront) {
+ for (BlockingResourceBase* br = ResourceChainPrev(chainFront); br;
+ br = ResourceChainPrev(br)) {
+ if (br == this) {
+ NS_WARNING(
+ "Re-entering RecursiveMutex after acquiring other resources.");
+
+ // show the caller why this is potentially bad
+ CheckAcquire();
+
+ LockInternal();
+ ++mEntryCount;
+ return;
+ }
+ }
+ }
+
+ CheckAcquire();
+ LockInternal();
+ NS_ASSERTION(mEntryCount == 0, "RecursiveMutex isn't free!");
+ Acquire(); // protected by us
+ mOwningThread = PR_GetCurrentThread();
+ mEntryCount = 1;
+}
+
+void RecursiveMutex::Unlock() {
+ if (--mEntryCount == 0) {
+ Release(); // protected by us
+ mOwningThread = nullptr;
+ }
+ UnlockInternal();
+}
+
+void RecursiveMutex::AssertCurrentThreadIn() const {
+ MOZ_ASSERT(IsAcquired() && mOwningThread == PR_GetCurrentThread());
+}
+
+//
+// Debug implementation of CondVar
+void OffTheBooksCondVar::Wait() {
+ // Forward to the timed version of OffTheBooksCondVar::Wait to avoid code
+ // duplication.
+ CVStatus status = Wait(TimeDuration::Forever());
+ MOZ_ASSERT(status == CVStatus::NoTimeout);
+}
+
+CVStatus OffTheBooksCondVar::Wait(TimeDuration aDuration) {
+ AssertCurrentThreadOwnsMutex();
+
+ // save mutex state and reset to empty
+ AcquisitionState savedAcquisitionState = mLock->TakeAcquisitionState();
+ BlockingResourceBase* savedChainPrev = mLock->mChainPrev;
+ PRThread* savedOwningThread = mLock->mOwningThread;
+ mLock->mChainPrev = 0;
+ mLock->mOwningThread = nullptr;
+
+ // give up mutex until we're back from Wait()
+ CVStatus status;
+ {
+# if defined(MOZILLA_INTERNAL_API)
+ AUTO_PROFILER_THREAD_SLEEP;
+# endif
+ status = mImpl.wait_for(*mLock, aDuration);
+ }
+
+ // restore saved state
+ mLock->SetAcquisitionState(std::move(savedAcquisitionState));
+ mLock->mChainPrev = savedChainPrev;
+ mLock->mOwningThread = savedOwningThread;
+
+ return status;
+}
+
+#endif // ifdef DEBUG
+
+} // namespace mozilla
diff --git a/xpcom/threads/BlockingResourceBase.h b/xpcom/threads/BlockingResourceBase.h
new file mode 100644
index 0000000000..8bb7a78f6f
--- /dev/null
+++ b/xpcom/threads/BlockingResourceBase.h
@@ -0,0 +1,339 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_BlockingResourceBase_h
+#define mozilla_BlockingResourceBase_h
+
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/ThreadLocal.h"
+#include "mozilla/Attributes.h"
+
+#include "nscore.h"
+#include "nsDebug.h"
+
+#include "prtypes.h"
+
+#ifdef DEBUG
+
+// NB: Comment this out to enable callstack tracking.
+# define MOZ_CALLSTACK_DISABLED
+
+# include "prinit.h"
+
+# ifndef MOZ_CALLSTACK_DISABLED
+# include "mozilla/Maybe.h"
+# include "nsTArray.h"
+# endif
+
+#endif
+
+//
+// This header is not meant to be included by client code.
+//
+
+namespace mozilla {
+
+#ifdef DEBUG
+template <class T>
+class DeadlockDetector;
+#endif
+
+/**
+ * BlockingResourceBase
+ * Base class of resources that might block clients trying to acquire them.
+ * Does debugging and deadlock detection in DEBUG builds.
+ **/
+class BlockingResourceBase {
+ public:
+ // Needs to be kept in sync with kResourceTypeNames.
+ enum BlockingResourceType {
+ eMutex,
+ eReentrantMonitor,
+ eCondVar,
+ eRecursiveMutex
+ };
+
+ /**
+ * kResourceTypeName
+ * Human-readable version of BlockingResourceType enum.
+ */
+ static const char* const kResourceTypeName[];
+
+#ifdef DEBUG
+
+ static size_t SizeOfDeadlockDetector(MallocSizeOf aMallocSizeOf);
+
+ /**
+ * Print
+ * Write a description of this blocking resource to |aOut|. If
+ * the resource appears to be currently acquired, the current
+ * acquisition context is printed and true is returned.
+ * Otherwise, we print the context from |aFirstSeen|, the
+ * first acquisition from which the code calling |Print()|
+ * became interested in us, and return false.
+ *
+ * *NOT* thread safe. Reads |mAcquisitionContext| without
+ * synchronization, but this will not cause correctness
+ * problems.
+ *
+ * FIXME bug 456272: hack alert: because we can't write call
+ * contexts into strings, all info is written to stderr, but
+ * only some info is written into |aOut|
+ */
+ bool Print(nsACString& aOut) const;
+
+ size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
+ // NB: |mName| is not reported as it's expected to be a static string.
+ // If we switch to a nsString it should be added to the tally.
+ // |mChainPrev| is not reported because its memory is not owned.
+ size_t n = aMallocSizeOf(this);
+ return n;
+ }
+
+ // ``DDT'' = ``Deadlock Detector Type''
+ typedef DeadlockDetector<BlockingResourceBase> DDT;
+
+ protected:
+# ifdef MOZ_CALLSTACK_DISABLED
+ typedef bool AcquisitionState;
+# else
+ // Using maybe to use emplacement as the acquisition state flag; we may not
+ // always get a stack trace because of possible stack walk suppression or
+ // errors, hence can't use !IsEmpty() on the array itself as indication.
+ static size_t const kAcquisitionStateStackSize = 24;
+ typedef Maybe<AutoTArray<void*, kAcquisitionStateStackSize> >
+ AcquisitionState;
+# endif
+
+ /**
+ * BlockingResourceBase
+ * Initialize this blocking resource. Also hooks the resource into
+ * instrumentation code.
+ *
+ * Thread safe.
+ *
+ * @param aName A meaningful, unique name that can be used in
+ * error messages, et al.
+ * @param aType The specific type of |this|, if any.
+ **/
+ BlockingResourceBase(const char* aName, BlockingResourceType aType);
+
+ ~BlockingResourceBase();
+
+ /**
+ * CheckAcquire
+ *
+ * Thread safe.
+ **/
+ void CheckAcquire();
+
+ /**
+ * Acquire
+ *
+ * *NOT* thread safe. Requires ownership of underlying resource.
+ **/
+ void Acquire(); // NS_NEEDS_RESOURCE(this)
+
+ /**
+ * Release
+ * Remove this resource from the current thread's acquisition chain.
+ * The resource does not have to be at the front of the chain, although
+ * it is confusing to release resources in a different order than they
+ * are acquired. This generates a warning.
+ *
+ * *NOT* thread safe. Requires ownership of underlying resource.
+ **/
+ void Release(); // NS_NEEDS_RESOURCE(this)
+
+ /**
+ * ResourceChainFront
+ *
+ * Thread safe.
+ *
+ * @return the front of the resource acquisition chain, i.e., the last
+ * resource acquired.
+ */
+ static BlockingResourceBase* ResourceChainFront() {
+ return sResourceAcqnChainFront.get();
+ }
+
+ /**
+ * ResourceChainPrev
+ *
+ * *NOT* thread safe. Requires ownership of underlying resource.
+ */
+ static BlockingResourceBase* ResourceChainPrev(
+ const BlockingResourceBase* aResource) {
+ return aResource->mChainPrev;
+ } // NS_NEEDS_RESOURCE(this)
+
+ /**
+ * ResourceChainAppend
+ * Set |this| to the front of the resource acquisition chain, and link
+ * |this| to |aPrev|.
+ *
+ * *NOT* thread safe. Requires ownership of underlying resource.
+ */
+ void ResourceChainAppend(BlockingResourceBase* aPrev) {
+ mChainPrev = aPrev;
+ sResourceAcqnChainFront.set(this);
+ } // NS_NEEDS_RESOURCE(this)
+
+ /**
+ * ResourceChainRemove
+ * Remove |this| from the front of the resource acquisition chain.
+ *
+ * *NOT* thread safe. Requires ownership of underlying resource.
+ */
+ void ResourceChainRemove() {
+ NS_ASSERTION(this == ResourceChainFront(), "not at chain front");
+ sResourceAcqnChainFront.set(mChainPrev);
+ } // NS_NEEDS_RESOURCE(this)
+
+ /**
+ * TakeAcquisitionState
+ * Return whether or not this resource was acquired and mark the resource
+ * as not acquired for subsequent uses.
+ *
+ * *NOT* thread safe. Requires ownership of underlying resource.
+ */
+ AcquisitionState TakeAcquisitionState() {
+# ifdef MOZ_CALLSTACK_DISABLED
+ bool acquired = mAcquired;
+ ClearAcquisitionState();
+ return acquired;
+# else
+ return mAcquired.take();
+# endif
+ }
+
+ /**
+ * SetAcquisitionState
+ * Set whether or not this resource was acquired.
+ *
+ * *NOT* thread safe. Requires ownership of underlying resource.
+ */
+ void SetAcquisitionState(AcquisitionState&& aAcquisitionState) {
+ mAcquired = std::move(aAcquisitionState);
+ }
+
+ /**
+ * ClearAcquisitionState
+ * Indicate this resource is not acquired.
+ *
+ * *NOT* thread safe. Requires ownership of underlying resource.
+ */
+ void ClearAcquisitionState() {
+# ifdef MOZ_CALLSTACK_DISABLED
+ mAcquired = false;
+# else
+ mAcquired.reset();
+# endif
+ }
+
+ /**
+ * IsAcquired
+ * Indicates if this resource is acquired.
+ *
+ * *NOT* thread safe. Requires ownership of underlying resource.
+ */
+ bool IsAcquired() const { return (bool)mAcquired; }
+
+ /**
+ * mChainPrev
+ * A series of resource acquisitions creates a chain of orders. This
+ * chain is implemented as a linked list; |mChainPrev| points to the
+ * resource most recently Acquire()'d before this one.
+ **/
+ BlockingResourceBase* mChainPrev;
+
+ private:
+ /**
+ * mName
+ * A descriptive name for this resource. Used in error
+ * messages etc.
+ */
+ const char* mName;
+
+ /**
+ * mType
+ * The more specific type of this resource. Used to implement
+ * special semantics (e.g., reentrancy of monitors).
+ **/
+ BlockingResourceType mType;
+
+ /**
+ * mAcquired
+ * Indicates if this resource is currently acquired.
+ */
+ AcquisitionState mAcquired;
+
+# ifndef MOZ_CALLSTACK_DISABLED
+ /**
+ * mFirstSeen
+ * Inidicates where this resource was first acquired.
+ */
+ AcquisitionState mFirstSeen;
+# endif
+
+ /**
+ * sCallOnce
+ * Ensures static members are initialized only once, and in a
+ * thread-safe way.
+ */
+ static PRCallOnceType sCallOnce;
+
+ /**
+ * Thread-private pointer to the front of each thread's resource
+ * acquisition chain.
+ */
+ static MOZ_THREAD_LOCAL(BlockingResourceBase*) sResourceAcqnChainFront;
+
+ /**
+ * sDeadlockDetector
+ * Does as named.
+ */
+ static DDT* sDeadlockDetector;
+
+ /**
+ * InitStatics
+ * Inititialize static members of BlockingResourceBase that can't
+ * be statically initialized.
+ *
+ * *NOT* thread safe.
+ */
+ static PRStatus InitStatics();
+
+ /**
+ * Shutdown
+ * Free static members.
+ *
+ * *NOT* thread safe.
+ */
+ static void Shutdown();
+
+ static void StackWalkCallback(uint32_t aFrameNumber, void* aPc, void* aSp,
+ void* aClosure);
+ static void GetStackTrace(AcquisitionState& aState,
+ const void* aFirstFramePC);
+
+# ifdef MOZILLA_INTERNAL_API
+ // so it can call BlockingResourceBase::Shutdown()
+ friend void LogTerm();
+# endif // ifdef MOZILLA_INTERNAL_API
+
+#else // non-DEBUG implementation
+
+ BlockingResourceBase(const char* aName, BlockingResourceType aType) {}
+
+ ~BlockingResourceBase() {}
+
+#endif
+};
+
+} // namespace mozilla
+
+#endif // mozilla_BlockingResourceBase_h
diff --git a/xpcom/threads/CPUUsageWatcher.cpp b/xpcom/threads/CPUUsageWatcher.cpp
new file mode 100644
index 0000000000..922ca81e8d
--- /dev/null
+++ b/xpcom/threads/CPUUsageWatcher.cpp
@@ -0,0 +1,252 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/CPUUsageWatcher.h"
+
+#include "prsystem.h"
+
+#ifdef XP_MACOSX
+# include <sys/resource.h>
+# include <mach/clock.h>
+# include <mach/mach_host.h>
+#endif
+
+#ifdef CPU_USAGE_WATCHER_ACTIVE
+# include "mozilla/BackgroundHangMonitor.h"
+#endif
+
+namespace mozilla {
+
+#ifdef CPU_USAGE_WATCHER_ACTIVE
+
+// Even if the machine only has one processor, tolerate up to 50%
+// external CPU usage.
+static const float kTolerableExternalCPUUsageFloor = 0.5f;
+
+struct CPUStats {
+ // The average CPU usage time, which can be summed across all cores in the
+ // system, or averaged between them. Whichever it is, it needs to be in the
+ // same units as updateTime.
+ uint64_t usageTime;
+ // A monotonically increasing value in the same units as usageTime, which can
+ // be used to determine the percentage of active vs idle time
+ uint64_t updateTime;
+};
+
+# ifdef XP_MACOSX
+
+static const uint64_t kMicrosecondsPerSecond = 1000000LL;
+static const uint64_t kNanosecondsPerMicrosecond = 1000LL;
+
+static uint64_t GetMicroseconds(timeval time) {
+ return ((uint64_t)time.tv_sec) * kMicrosecondsPerSecond +
+ (uint64_t)time.tv_usec;
+}
+
+static uint64_t GetMicroseconds(mach_timespec_t time) {
+ return ((uint64_t)time.tv_sec) * kMicrosecondsPerSecond +
+ ((uint64_t)time.tv_nsec) / kNanosecondsPerMicrosecond;
+}
+
+static Result<CPUStats, CPUUsageWatcherError> GetProcessCPUStats(
+ int32_t numCPUs) {
+ CPUStats result = {};
+ rusage usage;
+ int32_t rusageResult = getrusage(RUSAGE_SELF, &usage);
+ if (rusageResult == -1) {
+ return Err(GetProcessTimesError);
+ }
+ result.usageTime =
+ GetMicroseconds(usage.ru_utime) + GetMicroseconds(usage.ru_stime);
+
+ clock_serv_t realtimeClock;
+ kern_return_t errorResult =
+ host_get_clock_service(mach_host_self(), REALTIME_CLOCK, &realtimeClock);
+ if (errorResult != KERN_SUCCESS) {
+ return Err(GetProcessTimesError);
+ }
+ mach_timespec_t time;
+ errorResult = clock_get_time(realtimeClock, &time);
+ if (errorResult != KERN_SUCCESS) {
+ return Err(GetProcessTimesError);
+ }
+ result.updateTime = GetMicroseconds(time);
+
+ // getrusage will give us the sum of the values across all
+ // of our cores. Divide by the number of CPUs to get an average.
+ result.usageTime /= numCPUs;
+ return result;
+}
+
+static Result<CPUStats, CPUUsageWatcherError> GetGlobalCPUStats() {
+ CPUStats result = {};
+ host_cpu_load_info_data_t loadInfo;
+ mach_msg_type_number_t loadInfoCount = HOST_CPU_LOAD_INFO_COUNT;
+ kern_return_t statsResult =
+ host_statistics(mach_host_self(), HOST_CPU_LOAD_INFO,
+ (host_info_t)&loadInfo, &loadInfoCount);
+ if (statsResult != KERN_SUCCESS) {
+ return Err(HostStatisticsError);
+ }
+
+ result.usageTime = loadInfo.cpu_ticks[CPU_STATE_USER] +
+ loadInfo.cpu_ticks[CPU_STATE_NICE] +
+ loadInfo.cpu_ticks[CPU_STATE_SYSTEM];
+ result.updateTime = result.usageTime + loadInfo.cpu_ticks[CPU_STATE_IDLE];
+ return result;
+}
+
+# endif // XP_MACOSX
+
+# ifdef XP_WIN
+
+// A FILETIME represents the number of 100-nanosecond ticks since 1/1/1601 UTC
+uint64_t FiletimeToInteger(FILETIME filetime) {
+ return ((uint64_t)filetime.dwLowDateTime) | (uint64_t)filetime.dwHighDateTime
+ << 32;
+}
+
+Result<CPUStats, CPUUsageWatcherError> GetProcessCPUStats(int32_t numCPUs) {
+ CPUStats result = {};
+ FILETIME creationFiletime;
+ FILETIME exitFiletime;
+ FILETIME kernelFiletime;
+ FILETIME userFiletime;
+ bool success = GetProcessTimes(GetCurrentProcess(), &creationFiletime,
+ &exitFiletime, &kernelFiletime, &userFiletime);
+ if (!success) {
+ return Err(GetProcessTimesError);
+ }
+
+ result.usageTime =
+ FiletimeToInteger(kernelFiletime) + FiletimeToInteger(userFiletime);
+
+ FILETIME nowFiletime;
+ GetSystemTimeAsFileTime(&nowFiletime);
+ result.updateTime = FiletimeToInteger(nowFiletime);
+
+ result.usageTime /= numCPUs;
+
+ return result;
+}
+
+Result<CPUStats, CPUUsageWatcherError> GetGlobalCPUStats() {
+ CPUStats result = {};
+ FILETIME idleFiletime;
+ FILETIME kernelFiletime;
+ FILETIME userFiletime;
+ bool success = GetSystemTimes(&idleFiletime, &kernelFiletime, &userFiletime);
+
+ if (!success) {
+ return Err(GetSystemTimesError);
+ }
+
+ result.usageTime =
+ FiletimeToInteger(kernelFiletime) + FiletimeToInteger(userFiletime);
+ result.updateTime = result.usageTime + FiletimeToInteger(idleFiletime);
+
+ return result;
+}
+
+# endif // XP_WIN
+
+Result<Ok, CPUUsageWatcherError> CPUUsageWatcher::Init() {
+ mNumCPUs = PR_GetNumberOfProcessors();
+ if (mNumCPUs <= 0) {
+ mExternalUsageThreshold = 1.0f;
+ return Err(GetNumberOfProcessorsError);
+ }
+ mExternalUsageThreshold =
+ std::max(1.0f - 1.0f / (float)mNumCPUs, kTolerableExternalCPUUsageFloor);
+
+ CPUStats processTimes;
+ MOZ_TRY_VAR(processTimes, GetProcessCPUStats(mNumCPUs));
+ mProcessUpdateTime = processTimes.updateTime;
+ mProcessUsageTime = processTimes.usageTime;
+
+ CPUStats globalTimes;
+ MOZ_TRY_VAR(globalTimes, GetGlobalCPUStats());
+ mGlobalUpdateTime = globalTimes.updateTime;
+ mGlobalUsageTime = globalTimes.usageTime;
+
+ mInitialized = true;
+
+ CPUUsageWatcher* self = this;
+ NS_DispatchToMainThread(NS_NewRunnableFunction(
+ "CPUUsageWatcher::Init",
+ [=]() { BackgroundHangMonitor::RegisterAnnotator(*self); }));
+
+ return Ok();
+}
+
+void CPUUsageWatcher::Uninit() {
+ if (mInitialized) {
+ BackgroundHangMonitor::UnregisterAnnotator(*this);
+ }
+ mInitialized = false;
+}
+
+Result<Ok, CPUUsageWatcherError> CPUUsageWatcher::CollectCPUUsage() {
+ if (!mInitialized) {
+ return Ok();
+ }
+
+ mExternalUsageRatio = 0.0f;
+
+ CPUStats processTimes;
+ MOZ_TRY_VAR(processTimes, GetProcessCPUStats(mNumCPUs));
+ CPUStats globalTimes;
+ MOZ_TRY_VAR(globalTimes, GetGlobalCPUStats());
+
+ uint64_t processUsageDelta = processTimes.usageTime - mProcessUsageTime;
+ uint64_t processUpdateDelta = processTimes.updateTime - mProcessUpdateTime;
+ float processUsageNormalized =
+ processUsageDelta > 0
+ ? (float)processUsageDelta / (float)processUpdateDelta
+ : 0.0f;
+
+ uint64_t globalUsageDelta = globalTimes.usageTime - mGlobalUsageTime;
+ uint64_t globalUpdateDelta = globalTimes.updateTime - mGlobalUpdateTime;
+ float globalUsageNormalized =
+ globalUsageDelta > 0 ? (float)globalUsageDelta / (float)globalUpdateDelta
+ : 0.0f;
+
+ mProcessUsageTime = processTimes.usageTime;
+ mProcessUpdateTime = processTimes.updateTime;
+ mGlobalUsageTime = globalTimes.usageTime;
+ mGlobalUpdateTime = globalTimes.updateTime;
+
+ mExternalUsageRatio =
+ std::max(0.0f, globalUsageNormalized - processUsageNormalized);
+
+ return Ok();
+}
+
+void CPUUsageWatcher::AnnotateHang(BackgroundHangAnnotations& aAnnotations) {
+ if (!mInitialized) {
+ return;
+ }
+
+ if (mExternalUsageRatio > mExternalUsageThreshold) {
+ aAnnotations.AddAnnotation(u"ExternalCPUHigh"_ns, true);
+ }
+}
+
+#else // !CPU_USAGE_WATCHER_ACTIVE
+
+Result<Ok, CPUUsageWatcherError> CPUUsageWatcher::Init() { return Ok(); }
+
+void CPUUsageWatcher::Uninit() {}
+
+Result<Ok, CPUUsageWatcherError> CPUUsageWatcher::CollectCPUUsage() {
+ return Ok();
+}
+
+void CPUUsageWatcher::AnnotateHang(BackgroundHangAnnotations& aAnnotations) {}
+
+#endif // CPU_USAGE_WATCHER_ACTIVE
+
+} // namespace mozilla
diff --git a/xpcom/threads/CPUUsageWatcher.h b/xpcom/threads/CPUUsageWatcher.h
new file mode 100644
index 0000000000..c3a643378a
--- /dev/null
+++ b/xpcom/threads/CPUUsageWatcher.h
@@ -0,0 +1,100 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_CPUUsageWatcher_h
+#define mozilla_CPUUsageWatcher_h
+
+#include <stdint.h>
+
+#include "mozilla/HangAnnotations.h"
+#include "mozilla/Result.h"
+
+// We only support OSX and Windows, because on Linux we're forced to read
+// from /proc/stat in order to get global CPU values. We would prefer to not
+// eat that cost for this.
+#if defined(NIGHTLY_BUILD) && (defined(XP_WIN) || defined(XP_MACOSX))
+# define CPU_USAGE_WATCHER_ACTIVE
+#endif
+
+namespace mozilla {
+
+// Start error values at 1 to allow using the UnusedZero Result
+// optimization.
+enum CPUUsageWatcherError : uint8_t {
+ ClockGetTimeError = 1,
+ GetNumberOfProcessorsError,
+ GetProcessTimesError,
+ GetSystemTimesError,
+ HostStatisticsError,
+ ProcStatError,
+};
+
+namespace detail {
+
+template <>
+struct UnusedZero<CPUUsageWatcherError> : UnusedZeroEnum<CPUUsageWatcherError> {
+};
+
+} // namespace detail
+
+class CPUUsageHangAnnotator : public BackgroundHangAnnotator {
+ public:
+};
+
+class CPUUsageWatcher : public BackgroundHangAnnotator {
+ public:
+#ifdef CPU_USAGE_WATCHER_ACTIVE
+ CPUUsageWatcher()
+ : mInitialized(false),
+ mExternalUsageThreshold(0),
+ mExternalUsageRatio(0),
+ mProcessUsageTime(0),
+ mProcessUpdateTime(0),
+ mGlobalUsageTime(0),
+ mGlobalUpdateTime(0),
+ mNumCPUs(0) {}
+#endif
+
+ Result<Ok, CPUUsageWatcherError> Init();
+
+ void Uninit();
+
+ // Updates necessary values to allow AnnotateHang to function. This must be
+ // called on some semi-regular basis, as it will calculate the mean CPU
+ // usage values between now and the last time it was called.
+ Result<Ok, CPUUsageWatcherError> CollectCPUUsage();
+
+ void AnnotateHang(BackgroundHangAnnotations& aAnnotations) final;
+
+ private:
+#ifdef CPU_USAGE_WATCHER_ACTIVE
+ bool mInitialized;
+ // The threshold above which we will mark a hang as occurring under high
+ // external CPU usage conditions
+ float mExternalUsageThreshold;
+ // The CPU usage (0-1) external to our process, averaged between the two
+ // most recent monitor thread runs
+ float mExternalUsageRatio;
+ // The total cumulative CPU usage time by our process as of the last
+ // CollectCPUUsage or Startup
+ uint64_t mProcessUsageTime;
+ // A time value in the same units as mProcessUsageTime used to
+ // determine the ratio of CPU usage time to idle time
+ uint64_t mProcessUpdateTime;
+ // The total cumulative CPU usage time by all processes as of the last
+ // CollectCPUUsage or Startup
+ uint64_t mGlobalUsageTime;
+ // A time value in the same units as mGlobalUsageTime used to
+ // determine the ratio of CPU usage time to idle time
+ uint64_t mGlobalUpdateTime;
+ // The number of virtual cores on our machine
+ uint64_t mNumCPUs;
+#endif
+};
+
+} // namespace mozilla
+
+#endif // mozilla_CPUUsageWatcher_h
diff --git a/xpcom/threads/CondVar.h b/xpcom/threads/CondVar.h
new file mode 100644
index 0000000000..e427fc2d9e
--- /dev/null
+++ b/xpcom/threads/CondVar.h
@@ -0,0 +1,139 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_CondVar_h
+#define mozilla_CondVar_h
+
+#include "mozilla/BlockingResourceBase.h"
+#include "mozilla/PlatformConditionVariable.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/TimeStamp.h"
+
+#if defined(MOZILLA_INTERNAL_API) && !defined(DEBUG)
+# include "mozilla/ProfilerThreadSleep.h"
+#endif // defined( MOZILLA_INTERNAL_API) && !defined(DEBUG)
+
+namespace mozilla {
+
+/**
+ * Similarly to OffTheBooksMutex, OffTheBooksCondvar is identical to CondVar,
+ * except that OffTheBooksCondVar doesn't include leak checking. Sometimes
+ * you want to intentionally "leak" a CondVar until shutdown; in these cases,
+ * OffTheBooksCondVar is for you.
+ */
+class OffTheBooksCondVar : BlockingResourceBase {
+ public:
+ /**
+ * OffTheBooksCondVar
+ *
+ * The CALLER owns |aLock|.
+ *
+ * @param aLock A Mutex to associate with this condition variable.
+ * @param aName A name which can reference this monitor
+ * @returns If failure, nullptr.
+ * If success, a valid Monitor* which must be destroyed
+ * by Monitor::DestroyMonitor()
+ **/
+ OffTheBooksCondVar(OffTheBooksMutex& aLock, const char* aName)
+ : BlockingResourceBase(aName, eCondVar), mLock(&aLock) {}
+
+ /**
+ * ~OffTheBooksCondVar
+ * Clean up after this OffTheBooksCondVar, but NOT its associated Mutex.
+ **/
+ ~OffTheBooksCondVar() = default;
+
+ /**
+ * Wait
+ * @see prcvar.h
+ **/
+#ifndef DEBUG
+ void Wait() {
+# ifdef MOZILLA_INTERNAL_API
+ AUTO_PROFILER_THREAD_SLEEP;
+# endif // MOZILLA_INTERNAL_API
+ mImpl.wait(*mLock);
+ }
+
+ CVStatus Wait(TimeDuration aDuration) {
+# ifdef MOZILLA_INTERNAL_API
+ AUTO_PROFILER_THREAD_SLEEP;
+# endif // MOZILLA_INTERNAL_API
+ return mImpl.wait_for(*mLock, aDuration);
+ }
+#else
+ // NOTE: debug impl is in BlockingResourceBase.cpp
+ void Wait();
+ CVStatus Wait(TimeDuration aDuration);
+#endif
+
+ /**
+ * Notify
+ * @see prcvar.h
+ **/
+ void Notify() { mImpl.notify_one(); }
+
+ /**
+ * NotifyAll
+ * @see prcvar.h
+ **/
+ void NotifyAll() { mImpl.notify_all(); }
+
+#ifdef DEBUG
+ /**
+ * AssertCurrentThreadOwnsMutex
+ * @see Mutex::AssertCurrentThreadOwns
+ **/
+ void AssertCurrentThreadOwnsMutex() const MOZ_ASSERT_CAPABILITY(mLock) {
+ mLock->AssertCurrentThreadOwns();
+ }
+
+ /**
+ * AssertNotCurrentThreadOwnsMutex
+ * @see Mutex::AssertNotCurrentThreadOwns
+ **/
+ void AssertNotCurrentThreadOwnsMutex() const MOZ_ASSERT_CAPABILITY(!mLock) {
+ mLock->AssertNotCurrentThreadOwns();
+ }
+
+#else
+ void AssertCurrentThreadOwnsMutex() const MOZ_ASSERT_CAPABILITY(mLock) {}
+ void AssertNotCurrentThreadOwnsMutex() const MOZ_ASSERT_CAPABILITY(!mLock) {}
+
+#endif // ifdef DEBUG
+
+ private:
+ OffTheBooksCondVar();
+ OffTheBooksCondVar(const OffTheBooksCondVar&) = delete;
+ OffTheBooksCondVar& operator=(const OffTheBooksCondVar&) = delete;
+
+ OffTheBooksMutex* mLock;
+ detail::ConditionVariableImpl mImpl;
+};
+
+/**
+ * CondVar
+ * Vanilla condition variable. Please don't use this unless you have a
+ * compelling reason --- Monitor provides a simpler API.
+ */
+class CondVar : public OffTheBooksCondVar {
+ public:
+ CondVar(OffTheBooksMutex& aLock, const char* aName)
+ : OffTheBooksCondVar(aLock, aName) {
+ MOZ_COUNT_CTOR(CondVar);
+ }
+
+ MOZ_COUNTED_DTOR(CondVar)
+
+ private:
+ CondVar();
+ CondVar(const CondVar&);
+ CondVar& operator=(const CondVar&);
+};
+
+} // namespace mozilla
+
+#endif // ifndef mozilla_CondVar_h
diff --git a/xpcom/threads/DataMutex.h b/xpcom/threads/DataMutex.h
new file mode 100644
index 0000000000..44f0a35762
--- /dev/null
+++ b/xpcom/threads/DataMutex.h
@@ -0,0 +1,130 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DataMutex_h__
+#define DataMutex_h__
+
+#include <utility>
+#include "mozilla/Mutex.h"
+#include "mozilla/StaticMutex.h"
+
+namespace mozilla {
+
+// A template to wrap a type with a mutex so that accesses to the type's
+// data are required to take the lock before accessing it. This ensures
+// that a mutex is explicitly associated with the data that it protects,
+// and makes it impossible to access the data without first taking the
+// associated mutex.
+//
+// This is based on Rust's std::sync::Mutex, which operates under the
+// strategy of locking data, rather than code.
+//
+// Examples:
+//
+// DataMutex<uint32_t> u32DataMutex(1, "u32DataMutex");
+// auto x = u32DataMutex.Lock();
+// *x = 4;
+// assert(*x, 4u);
+//
+// DataMutex<nsTArray<uint32_t>> arrayDataMutex("arrayDataMutex");
+// auto a = arrayDataMutex.Lock();
+// auto& x = a.ref();
+// x.AppendElement(1u);
+// assert(x[0], 1u);
+//
+template <typename T, typename MutexType>
+class DataMutexBase {
+ public:
+ template <typename V>
+ class MOZ_STACK_CLASS AutoLockBase {
+ public:
+ V* operator->() const& { return &ref(); }
+ V* operator->() const&& = delete;
+
+ V& operator*() const& { return ref(); }
+ V& operator*() const&& = delete;
+
+ // Like RefPtr, make this act like its underlying raw pointer type
+ // whenever it is used in a context where a raw pointer is expected.
+ operator V*() const& { return &ref(); }
+
+ // Like RefPtr, don't allow implicit conversion of temporary to raw pointer.
+ operator V*() const&& = delete;
+
+ V& ref() const& {
+ MOZ_ASSERT(mOwner);
+ return mOwner->mValue;
+ }
+ V& ref() const&& = delete;
+
+ AutoLockBase(AutoLockBase&& aOther) : mOwner(aOther.mOwner) {
+ aOther.mOwner = nullptr;
+ }
+
+ ~AutoLockBase() {
+ if (mOwner) {
+ mOwner->mMutex.Unlock();
+ mOwner = nullptr;
+ }
+ }
+
+ private:
+ friend class DataMutexBase;
+
+ AutoLockBase(const AutoLockBase& aOther) = delete;
+
+ explicit AutoLockBase(DataMutexBase<T, MutexType>* aDataMutex)
+ : mOwner(aDataMutex) {
+ MOZ_ASSERT(!!mOwner);
+ mOwner->mMutex.Lock();
+ }
+
+ DataMutexBase<T, MutexType>* mOwner;
+ };
+
+ using AutoLock = AutoLockBase<T>;
+ using ConstAutoLock = AutoLockBase<const T>;
+
+ explicit DataMutexBase(const char* aName) : mMutex(aName) {}
+
+ DataMutexBase(T&& aValue, const char* aName)
+ : mMutex(aName), mValue(std::move(aValue)) {}
+
+ AutoLock Lock() { return AutoLock(this); }
+ ConstAutoLock ConstLock() { return ConstAutoLock(this); }
+
+ const MutexType& Mutex() const { return mMutex; }
+
+ private:
+ MutexType mMutex;
+ T mValue;
+};
+
+// Craft a version of StaticMutex that takes a const char* in its ctor.
+// We need this so it works interchangeably with Mutex which requires a const
+// char* aName in its ctor.
+class StaticMutexNameless : public StaticMutex {
+ public:
+ explicit StaticMutexNameless(const char* aName) : StaticMutex() {}
+
+ private:
+ // Disallow copy construction, `=`, `new`, and `delete` like BaseStaticMutex.
+#ifdef DEBUG
+ StaticMutexNameless(StaticMutexNameless& aOther);
+#endif // DEBUG
+ StaticMutexNameless& operator=(StaticMutexNameless* aRhs);
+ static void* operator new(size_t) noexcept(true);
+ static void operator delete(void*);
+};
+
+template <typename T>
+using DataMutex = DataMutexBase<T, Mutex>;
+template <typename T>
+using StaticDataMutex = DataMutexBase<T, StaticMutexNameless>;
+
+} // namespace mozilla
+
+#endif // DataMutex_h__
diff --git a/xpcom/threads/DeadlockDetector.h b/xpcom/threads/DeadlockDetector.h
new file mode 100644
index 0000000000..5c40941328
--- /dev/null
+++ b/xpcom/threads/DeadlockDetector.h
@@ -0,0 +1,359 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef mozilla_DeadlockDetector_h
+#define mozilla_DeadlockDetector_h
+
+#include "mozilla/Attributes.h"
+
+#include <stdlib.h>
+
+#include "prlock.h"
+
+#include "nsClassHashtable.h"
+#include "nsTArray.h"
+
+namespace mozilla {
+
+/**
+ * DeadlockDetector
+ *
+ * The following is an approximate description of how the deadlock detector
+ * works.
+ *
+ * The deadlock detector ensures that all blocking resources are
+ * acquired according to a partial order P. One type of blocking
+ * resource is a lock. If a lock l1 is acquired (locked) before l2,
+ * then we say that |l1 <_P l2|. The detector flags an error if two
+ * locks l1 and l2 have an inconsistent ordering in P; that is, if
+ * both |l1 <_P l2| and |l2 <_P l1|. This is a potential error
+ * because a thread acquiring l1,l2 according to the first order might
+ * race with a thread acquiring them according to the second order.
+ * If this happens under the right conditions, then the acquisitions
+ * will deadlock.
+ *
+ * This deadlock detector doesn't know at compile-time what P is. So,
+ * it tries to discover the order at run time. More precisely, it
+ * finds <i>some</i> order P, then tries to find chains of resource
+ * acquisitions that violate P. An example acquisition sequence, and
+ * the orders they impose, is
+ * l1.lock() // current chain: [ l1 ]
+ * // order: { }
+ *
+ * l2.lock() // current chain: [ l1, l2 ]
+ * // order: { l1 <_P l2 }
+ *
+ * l3.lock() // current chain: [ l1, l2, l3 ]
+ * // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3 }
+ * // (note: <_P is transitive, so also |l1 <_P l3|)
+ *
+ * l2.unlock() // current chain: [ l1, l3 ]
+ * // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3 }
+ * // (note: it's OK, but weird, that l2 was unlocked out
+ * // of order. we still have l1 <_P l3).
+ *
+ * l2.lock() // current chain: [ l1, l3, l2 ]
+ * // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3,
+ * l3 <_P l2 (!!!) }
+ * BEEP BEEP! Here the detector will flag a potential error, since
+ * l2 and l3 were used inconsistently (and potentially in ways that
+ * would deadlock).
+ */
+template <typename T>
+class DeadlockDetector {
+ public:
+ typedef nsTArray<const T*> ResourceAcquisitionArray;
+
+ private:
+ struct OrderingEntry;
+ typedef nsTArray<OrderingEntry*> HashEntryArray;
+ typedef typename HashEntryArray::index_type index_type;
+ typedef typename HashEntryArray::size_type size_type;
+ static const index_type NoIndex = HashEntryArray::NoIndex;
+
+ /**
+ * Value type for the ordering table. Contains the other
+ * resources on which an ordering constraint |key < other|
+ * exists. The catch is that we also store the calling context at
+ * which the other resource was acquired; this improves the
+ * quality of error messages when potential deadlock is detected.
+ */
+ struct OrderingEntry {
+ explicit OrderingEntry(const T* aResource)
+ : mOrderedLT() // FIXME bug 456272: set to empirical dep size?
+ ,
+ mExternalRefs(),
+ mResource(aResource) {}
+ ~OrderingEntry() {}
+
+ size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
+ size_t n = aMallocSizeOf(this);
+ n += mOrderedLT.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ n += mExternalRefs.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ return n;
+ }
+
+ HashEntryArray mOrderedLT; // this <_o Other
+ HashEntryArray mExternalRefs; // hash entries that reference this
+ const T* mResource;
+ };
+
+ // Throwaway RAII lock to make the following code safer.
+ struct PRAutoLock {
+ explicit PRAutoLock(PRLock* aLock) : mLock(aLock) { PR_Lock(mLock); }
+ ~PRAutoLock() { PR_Unlock(mLock); }
+ PRLock* mLock;
+ };
+
+ public:
+ static const uint32_t kDefaultNumBuckets;
+
+ /**
+ * DeadlockDetector
+ * Create a new deadlock detector.
+ *
+ * @param aNumResourcesGuess Guess at approximate number of resources
+ * that will be checked.
+ */
+ explicit DeadlockDetector(uint32_t aNumResourcesGuess = kDefaultNumBuckets)
+ : mOrdering(aNumResourcesGuess) {
+ mLock = PR_NewLock();
+ if (!mLock) {
+ MOZ_CRASH("couldn't allocate deadlock detector lock");
+ }
+ }
+
+ /**
+ * ~DeadlockDetector
+ *
+ * *NOT* thread safe.
+ */
+ ~DeadlockDetector() { PR_DestroyLock(mLock); }
+
+ size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
+ size_t n = aMallocSizeOf(this);
+
+ {
+ PRAutoLock _(mLock);
+ n += mOrdering.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ for (const auto& data : mOrdering.Values()) {
+ // NB: Key is accounted for in the entry.
+ n += data->SizeOfIncludingThis(aMallocSizeOf);
+ }
+ }
+
+ return n;
+ }
+
+ /**
+ * Add
+ * Make the deadlock detector aware of |aResource|.
+ *
+ * WARNING: The deadlock detector owns |aResource|.
+ *
+ * Thread safe.
+ *
+ * @param aResource Resource to make deadlock detector aware of.
+ */
+ void Add(const T* aResource) {
+ PRAutoLock _(mLock);
+ mOrdering.InsertOrUpdate(aResource, MakeUnique<OrderingEntry>(aResource));
+ }
+
+ void Remove(const T* aResource) {
+ PRAutoLock _(mLock);
+
+ OrderingEntry* entry = mOrdering.Get(aResource);
+
+ // Iterate the external refs and remove the entry from them.
+ HashEntryArray& refs = entry->mExternalRefs;
+ for (index_type i = 0; i < refs.Length(); i++) {
+ refs[i]->mOrderedLT.RemoveElementSorted(entry);
+ }
+
+ // Iterate orders and remove this entry from their refs.
+ HashEntryArray& orders = entry->mOrderedLT;
+ for (index_type i = 0; i < orders.Length(); i++) {
+ orders[i]->mExternalRefs.RemoveElementSorted(entry);
+ }
+
+ // Now the entry can be safely removed.
+ mOrdering.Remove(aResource);
+ }
+
+ /**
+ * CheckAcquisition This method is called after acquiring |aLast|,
+ * but before trying to acquire |aProposed|.
+ * It determines whether actually trying to acquire |aProposed|
+ * will create problems. It is OK if |aLast| is nullptr; this is
+ * interpreted as |aProposed| being the thread's first acquisition
+ * of its current chain.
+ *
+ * Iff acquiring |aProposed| may lead to deadlock for some thread
+ * interleaving (including the current one!), the cyclical
+ * dependency from which this was deduced is returned. Otherwise,
+ * 0 is returned.
+ *
+ * If a potential deadlock is detected and a resource cycle is
+ * returned, it is the *caller's* responsibility to free it.
+ *
+ * Thread safe.
+ *
+ * @param aLast Last resource acquired by calling thread (or 0).
+ * @param aProposed Resource calling thread proposes to acquire.
+ */
+ ResourceAcquisitionArray* CheckAcquisition(const T* aLast,
+ const T* aProposed) {
+ if (!aLast) {
+ // don't check if |0 < aProposed|; just vamoose
+ return 0;
+ }
+
+ NS_ASSERTION(aProposed, "null resource");
+ PRAutoLock _(mLock);
+
+ OrderingEntry* proposed = mOrdering.Get(aProposed);
+ NS_ASSERTION(proposed, "missing ordering entry");
+
+ OrderingEntry* current = mOrdering.Get(aLast);
+ NS_ASSERTION(current, "missing ordering entry");
+
+ // this is the crux of the deadlock detector algorithm
+
+ if (current == proposed) {
+ // reflexive deadlock. fastpath b/c InTransitiveClosure is
+ // not applicable here.
+ ResourceAcquisitionArray* cycle = new ResourceAcquisitionArray();
+ if (!cycle) {
+ MOZ_CRASH("can't allocate dep. cycle array");
+ }
+ cycle->AppendElement(current->mResource);
+ cycle->AppendElement(aProposed);
+ return cycle;
+ }
+ if (InTransitiveClosure(current, proposed)) {
+ // we've already established |aLast < aProposed|. all is well.
+ return 0;
+ }
+ if (InTransitiveClosure(proposed, current)) {
+ // the order |aProposed < aLast| has been deduced, perhaps
+ // transitively. we're attempting to violate that
+ // constraint by acquiring resources in the order
+ // |aLast < aProposed|, and thus we may deadlock under the
+ // right conditions.
+ ResourceAcquisitionArray* cycle = GetDeductionChain(proposed, current);
+ // show how acquiring |aProposed| would complete the cycle
+ cycle->AppendElement(aProposed);
+ return cycle;
+ }
+ // |aLast|, |aProposed| are unordered according to our
+ // poset. this is fine, but we now need to add this
+ // ordering constraint.
+ current->mOrderedLT.InsertElementSorted(proposed);
+ proposed->mExternalRefs.InsertElementSorted(current);
+ return 0;
+ }
+
+ /**
+ * Return true iff |aTarget| is in the transitive closure of |aStart|
+ * over the ordering relation `<_this'.
+ *
+ * @precondition |aStart != aTarget|
+ */
+ bool InTransitiveClosure(const OrderingEntry* aStart,
+ const OrderingEntry* aTarget) const {
+ // NB: Using a static comparator rather than default constructing one shows
+ // a 9% improvement in scalability tests on some systems.
+ static nsDefaultComparator<const OrderingEntry*, const OrderingEntry*> comp;
+ if (aStart->mOrderedLT.BinaryIndexOf(aTarget, comp) != NoIndex) {
+ return true;
+ }
+
+ index_type i = 0;
+ size_type len = aStart->mOrderedLT.Length();
+ for (auto it = aStart->mOrderedLT.Elements(); i < len; ++i, ++it) {
+ if (InTransitiveClosure(*it, aTarget)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Return an array of all resource acquisitions
+ * aStart <_this r1 <_this r2 <_ ... <_ aTarget
+ * from which |aStart <_this aTarget| was deduced, including
+ * |aStart| and |aTarget|.
+ *
+ * Nb: there may be multiple deductions of |aStart <_this
+ * aTarget|. This function returns the first ordering found by
+ * depth-first search.
+ *
+ * Nb: |InTransitiveClosure| could be replaced by this function.
+ * However, this one is more expensive because we record the DFS
+ * search stack on the heap whereas the other doesn't.
+ *
+ * @precondition |aStart != aTarget|
+ */
+ ResourceAcquisitionArray* GetDeductionChain(const OrderingEntry* aStart,
+ const OrderingEntry* aTarget) {
+ ResourceAcquisitionArray* chain = new ResourceAcquisitionArray();
+ if (!chain) {
+ MOZ_CRASH("can't allocate dep. cycle array");
+ }
+ chain->AppendElement(aStart->mResource);
+
+ NS_ASSERTION(GetDeductionChain_Helper(aStart, aTarget, chain),
+ "GetDeductionChain called when there's no deadlock");
+ return chain;
+ }
+
+ // precondition: |aStart != aTarget|
+ // invariant: |aStart| is the last element in |aChain|
+ bool GetDeductionChain_Helper(const OrderingEntry* aStart,
+ const OrderingEntry* aTarget,
+ ResourceAcquisitionArray* aChain) {
+ if (aStart->mOrderedLT.BinaryIndexOf(aTarget) != NoIndex) {
+ aChain->AppendElement(aTarget->mResource);
+ return true;
+ }
+
+ index_type i = 0;
+ size_type len = aStart->mOrderedLT.Length();
+ for (auto it = aStart->mOrderedLT.Elements(); i < len; ++i, ++it) {
+ aChain->AppendElement((*it)->mResource);
+ if (GetDeductionChain_Helper(*it, aTarget, aChain)) {
+ return true;
+ }
+ aChain->RemoveLastElement();
+ }
+ return false;
+ }
+
+ /**
+ * The partial order on resource acquisitions used by the deadlock
+ * detector.
+ */
+ nsClassHashtable<nsPtrHashKey<const T>, OrderingEntry> mOrdering;
+
+ /**
+ * Protects contentious methods.
+ * Nb: can't use mozilla::Mutex since we are used as its deadlock
+ * detector.
+ */
+ PRLock* mLock;
+
+ private:
+ DeadlockDetector(const DeadlockDetector& aDD) = delete;
+ DeadlockDetector& operator=(const DeadlockDetector& aDD) = delete;
+};
+
+template <typename T>
+// FIXME bug 456272: tune based on average workload
+const uint32_t DeadlockDetector<T>::kDefaultNumBuckets = 32;
+
+} // namespace mozilla
+
+#endif // ifndef mozilla_DeadlockDetector_h
diff --git a/xpcom/threads/DelayedRunnable.cpp b/xpcom/threads/DelayedRunnable.cpp
new file mode 100644
index 0000000000..a9231442a7
--- /dev/null
+++ b/xpcom/threads/DelayedRunnable.cpp
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "DelayedRunnable.h"
+
+#include "mozilla/ProfilerRunnable.h"
+
+namespace mozilla {
+
+DelayedRunnable::DelayedRunnable(already_AddRefed<nsISerialEventTarget> aTarget,
+ already_AddRefed<nsIRunnable> aRunnable,
+ uint32_t aDelay)
+ : mozilla::Runnable("DelayedRunnable"),
+ mTarget(aTarget),
+ mDelayedFrom(TimeStamp::NowLoRes()),
+ mDelay(aDelay),
+ mWrappedRunnable(aRunnable) {}
+
+nsresult DelayedRunnable::Init() {
+ MutexAutoLock lock(mMutex);
+ if (!mWrappedRunnable) {
+ MOZ_ASSERT_UNREACHABLE();
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ nsresult rv = mTarget->RegisterShutdownTask(this);
+ if (NS_FAILED(rv)) {
+ MOZ_DIAGNOSTIC_ASSERT(
+ rv == NS_ERROR_UNEXPECTED,
+ "DelayedRunnable target must support RegisterShutdownTask");
+ NS_WARNING("DelayedRunnable init after target is shutdown");
+ return rv;
+ }
+
+ rv = NS_NewTimerWithCallback(getter_AddRefs(mTimer), this, mDelay,
+ nsITimer::TYPE_ONE_SHOT, mTarget);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ mTarget->UnregisterShutdownTask(this);
+ }
+ return rv;
+}
+
+NS_IMETHODIMP DelayedRunnable::Run() {
+ MOZ_ASSERT(mTarget->IsOnCurrentThread());
+
+ nsCOMPtr<nsIRunnable> runnable;
+ {
+ MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(mTimer, "Init() must have been called");
+
+ // Already ran?
+ if (!mWrappedRunnable) {
+ return NS_OK;
+ }
+
+ // Are we too early?
+ if ((mozilla::TimeStamp::NowLoRes() - mDelayedFrom).ToMilliseconds() <
+ mDelay) {
+ return NS_OK; // Let the nsITimer run us.
+ }
+
+ mTimer->Cancel();
+ mTarget->UnregisterShutdownTask(this);
+ runnable = mWrappedRunnable.forget();
+ }
+
+ AUTO_PROFILE_FOLLOWING_RUNNABLE(runnable);
+ return runnable->Run();
+}
+
+NS_IMETHODIMP DelayedRunnable::Notify(nsITimer* aTimer) {
+ MOZ_ASSERT(mTarget->IsOnCurrentThread());
+
+ nsCOMPtr<nsIRunnable> runnable;
+ {
+ MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(mTimer, "Init() must have been called");
+
+ // We may have already run due to races
+ if (!mWrappedRunnable) {
+ return NS_OK;
+ }
+
+ mTarget->UnregisterShutdownTask(this);
+ runnable = mWrappedRunnable.forget();
+ }
+
+ AUTO_PROFILE_FOLLOWING_RUNNABLE(runnable);
+ return runnable->Run();
+}
+
+void DelayedRunnable::TargetShutdown() {
+ MOZ_ASSERT(mTarget->IsOnCurrentThread());
+
+ // Called at shutdown
+ MutexAutoLock lock(mMutex);
+ if (!mWrappedRunnable) {
+ return;
+ }
+ mWrappedRunnable = nullptr;
+
+ if (mTimer) {
+ mTimer->Cancel();
+ }
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(DelayedRunnable, Runnable, nsITimerCallback,
+ nsITargetShutdownTask)
+
+} // namespace mozilla
diff --git a/xpcom/threads/DelayedRunnable.h b/xpcom/threads/DelayedRunnable.h
new file mode 100644
index 0000000000..242278fa5b
--- /dev/null
+++ b/xpcom/threads/DelayedRunnable.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef XPCOM_THREADS_DELAYEDRUNNABLE_H_
+#define XPCOM_THREADS_DELAYEDRUNNABLE_H_
+
+#include "mozilla/TimeStamp.h"
+#include "nsCOMPtr.h"
+#include "nsIRunnable.h"
+#include "nsITargetShutdownTask.h"
+#include "nsITimer.h"
+#include "nsThreadUtils.h"
+
+namespace mozilla {
+
+class DelayedRunnable : public Runnable,
+ public nsITimerCallback,
+ public nsITargetShutdownTask {
+ public:
+ DelayedRunnable(already_AddRefed<nsISerialEventTarget> aTarget,
+ already_AddRefed<nsIRunnable> aRunnable, uint32_t aDelay);
+
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_NSIRUNNABLE
+ NS_DECL_NSITIMERCALLBACK
+
+ nsresult Init();
+
+ /**
+ * Called when the target is going away so the runnable can be released safely
+ * on the target thread.
+ */
+ void TargetShutdown() override;
+
+ private:
+ ~DelayedRunnable() = default;
+ nsresult DoRun();
+
+ const nsCOMPtr<nsISerialEventTarget> mTarget;
+ const TimeStamp mDelayedFrom;
+ const uint32_t mDelay;
+
+ mozilla::Mutex mMutex{"DelayedRunnable"};
+ nsCOMPtr<nsIRunnable> mWrappedRunnable;
+ nsCOMPtr<nsITimer> mTimer;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/EventQueue.cpp b/xpcom/threads/EventQueue.cpp
new file mode 100644
index 0000000000..0decc3ce4c
--- /dev/null
+++ b/xpcom/threads/EventQueue.cpp
@@ -0,0 +1,131 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/EventQueue.h"
+
+#include "GeckoProfiler.h"
+#include "InputTaskManager.h"
+#include "VsyncTaskManager.h"
+#include "nsIRunnable.h"
+#include "TaskController.h"
+
+using namespace mozilla;
+using namespace mozilla::detail;
+
+template <size_t ItemsPerPage>
+void EventQueueInternal<ItemsPerPage>::PutEvent(
+ already_AddRefed<nsIRunnable>&& aEvent, EventQueuePriority aPriority,
+ const MutexAutoLock& aProofOfLock, mozilla::TimeDuration* aDelay) {
+ nsCOMPtr<nsIRunnable> event(aEvent);
+
+ static_assert(static_cast<uint32_t>(nsIRunnablePriority::PRIORITY_IDLE) ==
+ static_cast<uint32_t>(EventQueuePriority::Idle));
+ static_assert(static_cast<uint32_t>(nsIRunnablePriority::PRIORITY_NORMAL) ==
+ static_cast<uint32_t>(EventQueuePriority::Normal));
+ static_assert(
+ static_cast<uint32_t>(nsIRunnablePriority::PRIORITY_MEDIUMHIGH) ==
+ static_cast<uint32_t>(EventQueuePriority::MediumHigh));
+ static_assert(
+ static_cast<uint32_t>(nsIRunnablePriority::PRIORITY_INPUT_HIGH) ==
+ static_cast<uint32_t>(EventQueuePriority::InputHigh));
+ static_assert(static_cast<uint32_t>(nsIRunnablePriority::PRIORITY_VSYNC) ==
+ static_cast<uint32_t>(EventQueuePriority::Vsync));
+ static_assert(
+ static_cast<uint32_t>(nsIRunnablePriority::PRIORITY_RENDER_BLOCKING) ==
+ static_cast<uint32_t>(EventQueuePriority::RenderBlocking));
+ static_assert(static_cast<uint32_t>(nsIRunnablePriority::PRIORITY_CONTROL) ==
+ static_cast<uint32_t>(EventQueuePriority::Control));
+
+ if (mForwardToTC) {
+ TaskController* tc = TaskController::Get();
+
+ TaskManager* manager = nullptr;
+ if (aPriority == EventQueuePriority::InputHigh) {
+ manager = InputTaskManager::Get();
+ } else if (aPriority == EventQueuePriority::DeferredTimers ||
+ aPriority == EventQueuePriority::Idle) {
+ manager = TaskController::Get()->GetIdleTaskManager();
+ } else if (aPriority == EventQueuePriority::Vsync) {
+ manager = VsyncTaskManager::Get();
+ }
+
+ tc->DispatchRunnable(event.forget(), static_cast<uint32_t>(aPriority),
+ manager);
+ return;
+ }
+
+ if (profiler_thread_is_being_profiled(ThreadProfilingFeatures::Sampling)) {
+ // check to see if the profiler has been enabled since the last PutEvent
+ while (mDispatchTimes.Count() < mQueue.Count()) {
+ mDispatchTimes.Push(TimeStamp());
+ }
+ mDispatchTimes.Push(aDelay ? TimeStamp::Now() - *aDelay : TimeStamp::Now());
+ }
+
+ mQueue.Push(std::move(event));
+}
+
+template <size_t ItemsPerPage>
+already_AddRefed<nsIRunnable> EventQueueInternal<ItemsPerPage>::GetEvent(
+ const MutexAutoLock& aProofOfLock, mozilla::TimeDuration* aLastEventDelay) {
+ if (mQueue.IsEmpty()) {
+ if (aLastEventDelay) {
+ *aLastEventDelay = TimeDuration();
+ }
+ return nullptr;
+ }
+
+ // We always want to clear the dispatch times, even if the profiler is turned
+ // off, because we want to empty the (previously-collected) dispatch times, if
+ // any, from when the profiler was turned on. We only want to do something
+ // interesting with the dispatch times if the profiler is turned on, though.
+ if (!mDispatchTimes.IsEmpty()) {
+ TimeStamp dispatch_time = mDispatchTimes.Pop();
+ if (profiler_is_active()) {
+ if (!dispatch_time.IsNull()) {
+ if (aLastEventDelay) {
+ *aLastEventDelay = TimeStamp::Now() - dispatch_time;
+ }
+ }
+ }
+ } else if (profiler_is_active()) {
+ if (aLastEventDelay) {
+ // if we just turned on the profiler, we don't have dispatch
+ // times for events already in the queue.
+ *aLastEventDelay = TimeDuration();
+ }
+ }
+
+ nsCOMPtr<nsIRunnable> result = mQueue.Pop();
+ return result.forget();
+}
+
+template <size_t ItemsPerPage>
+bool EventQueueInternal<ItemsPerPage>::IsEmpty(
+ const MutexAutoLock& aProofOfLock) {
+ return mQueue.IsEmpty();
+}
+
+template <size_t ItemsPerPage>
+bool EventQueueInternal<ItemsPerPage>::HasReadyEvent(
+ const MutexAutoLock& aProofOfLock) {
+ return !IsEmpty(aProofOfLock);
+}
+
+template <size_t ItemsPerPage>
+size_t EventQueueInternal<ItemsPerPage>::Count(
+ const MutexAutoLock& aProofOfLock) const {
+ return mQueue.Count();
+}
+
+namespace mozilla {
+template class EventQueueSized<16>; // Used by ThreadEventQueue
+template class EventQueueSized<64>; // Used by ThrottledEventQueue
+namespace detail {
+template class EventQueueInternal<16>; // Used by ThreadEventQueue
+template class EventQueueInternal<64>; // Used by ThrottledEventQueue
+} // namespace detail
+} // namespace mozilla
diff --git a/xpcom/threads/EventQueue.h b/xpcom/threads/EventQueue.h
new file mode 100644
index 0000000000..a604d03367
--- /dev/null
+++ b/xpcom/threads/EventQueue.h
@@ -0,0 +1,136 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_EventQueue_h
+#define mozilla_EventQueue_h
+
+#include "mozilla/Mutex.h"
+#include "mozilla/Queue.h"
+#include "mozilla/TimeStamp.h"
+#include "nsCOMPtr.h"
+
+class nsIRunnable;
+
+namespace mozilla {
+
+#define EVENT_QUEUE_PRIORITY_LIST(EVENT_PRIORITY) \
+ EVENT_PRIORITY(Idle, 0) \
+ EVENT_PRIORITY(DeferredTimers, 1) \
+ EVENT_PRIORITY(Low, 2) \
+ EVENT_PRIORITY(InputLow, 3) \
+ EVENT_PRIORITY(Normal, 4) \
+ EVENT_PRIORITY(MediumHigh, 5) \
+ EVENT_PRIORITY(InputHigh, 6) \
+ EVENT_PRIORITY(Vsync, 7) \
+ EVENT_PRIORITY(InputHighest, 8) \
+ EVENT_PRIORITY(RenderBlocking, 9) \
+ EVENT_PRIORITY(Control, 10)
+
+enum class EventQueuePriority {
+#define EVENT_PRIORITY(NAME, VALUE) NAME = VALUE,
+ EVENT_QUEUE_PRIORITY_LIST(EVENT_PRIORITY)
+#undef EVENT_PRIORITY
+ Invalid
+};
+
+class IdlePeriodState;
+
+namespace detail {
+
+// EventQueue is our unsynchronized event queue implementation. It is a queue
+// of runnables used for non-main thread, as well as optionally providing
+// forwarding to TaskController.
+//
+// Since EventQueue is unsynchronized, it should be wrapped in an outer
+// SynchronizedEventQueue implementation (like ThreadEventQueue).
+template <size_t ItemsPerPage>
+class EventQueueInternal {
+ public:
+ explicit EventQueueInternal(bool aForwardToTC) : mForwardToTC(aForwardToTC) {}
+
+ // Add an event to the end of the queue. Implementors are free to use
+ // aPriority however they wish. If the runnable supports
+ // nsIRunnablePriority and the implementing class supports
+ // prioritization, aPriority represents the result of calling
+ // nsIRunnablePriority::GetPriority(). *aDelay is time the event has
+ // already been delayed (used when moving an event from one queue to
+ // another)
+ void PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
+ EventQueuePriority aPriority, const MutexAutoLock& aProofOfLock,
+ mozilla::TimeDuration* aDelay = nullptr);
+
+ // Get an event from the front of the queue. This should return null if the
+ // queue is non-empty but the event in front is not ready to run.
+ // *aLastEventDelay is the time the event spent in queues before being
+ // retrieved.
+ already_AddRefed<nsIRunnable> GetEvent(
+ const MutexAutoLock& aProofOfLock,
+ mozilla::TimeDuration* aLastEventDelay = nullptr);
+
+ // Returns true if the queue is empty. Implies !HasReadyEvent().
+ bool IsEmpty(const MutexAutoLock& aProofOfLock);
+
+ // Returns true if the queue is non-empty and if the event in front is ready
+ // to run. Implies !IsEmpty(). This should return true iff GetEvent returns a
+ // non-null value.
+ bool HasReadyEvent(const MutexAutoLock& aProofOfLock);
+
+ // Returns the number of events in the queue.
+ size_t Count(const MutexAutoLock& aProofOfLock) const;
+ // For some reason, if we put this in the .cpp file the linker can't find it
+ already_AddRefed<nsIRunnable> PeekEvent(const MutexAutoLock& aProofOfLock) {
+ if (mQueue.IsEmpty()) {
+ return nullptr;
+ }
+
+ nsCOMPtr<nsIRunnable> result = mQueue.FirstElement();
+ return result.forget();
+ }
+
+ void EnableInputEventPrioritization(const MutexAutoLock& aProofOfLock) {}
+ void FlushInputEventPrioritization(const MutexAutoLock& aProofOfLock) {}
+ void SuspendInputEventPrioritization(const MutexAutoLock& aProofOfLock) {}
+ void ResumeInputEventPrioritization(const MutexAutoLock& aProofOfLock) {}
+
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
+ size_t size = mQueue.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ size += mDispatchTimes.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ return size;
+ }
+
+ private:
+ mozilla::Queue<nsCOMPtr<nsIRunnable>, ItemsPerPage> mQueue;
+ // This queue is only populated when the profiler is turned on.
+ mozilla::Queue<mozilla::TimeStamp, ItemsPerPage> mDispatchTimes;
+ TimeDuration mLastEventDelay;
+ // This indicates PutEvent forwards runnables to the TaskController. This
+ // should be true for the top level event queue on the main thread.
+ bool mForwardToTC;
+};
+
+} // namespace detail
+
+class EventQueue final : public mozilla::detail::EventQueueInternal<16> {
+ public:
+ explicit EventQueue(bool aForwardToTC = false)
+ : mozilla::detail::EventQueueInternal<16>(aForwardToTC) {}
+};
+
+template <size_t ItemsPerPage = 16>
+class EventQueueSized final
+ : public mozilla::detail::EventQueueInternal<ItemsPerPage> {
+ public:
+ explicit EventQueueSized(bool aForwardToTC = false)
+ : mozilla::detail::EventQueueInternal<ItemsPerPage>(aForwardToTC) {}
+};
+
+} // namespace mozilla
+
+#endif // mozilla_EventQueue_h
diff --git a/xpcom/threads/EventTargetCapability.h b/xpcom/threads/EventTargetCapability.h
new file mode 100644
index 0000000000..0cd85a7523
--- /dev/null
+++ b/xpcom/threads/EventTargetCapability.h
@@ -0,0 +1,95 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef XPCOM_THREADS_EVENTTARGETCAPABILITY_H_
+#define XPCOM_THREADS_EVENTTARGETCAPABILITY_H_
+
+#include "mozilla/ThreadSafety.h"
+#include "nsIEventTarget.h"
+
+namespace mozilla {
+
+// A helper to ensure that data access and function usage only take place on a
+// specific nsIEventTarget.
+//
+// This class works with Clang's thread safety analysis so that static analysis
+// can ensure AssertOnCurrentThread is called before using guarded data and
+// functions.
+//
+// This means using the class is similar to calling
+// `MOZ_ASSERT(mTarget->IsOnCurrentThread())`
+// prior to accessing things we expect to be on `mTarget`. However, using this
+// helper has the added benefit that static analysis will warn you if you
+// fail to assert prior to usage.
+//
+// The following is a basic example of a class using this to ensure
+// a data member is only accessed on a specific target.
+//
+// class SomeMediaHandlerThing {
+// public:
+// SomeMediaHandlerThing(nsIEventTarget* aTarget) : mTargetCapability(aTarget)
+// {}
+//
+// void UpdateMediaState() {
+// mTargetCapability.Dispatch(
+// NS_NewRunnableFunction("UpdateMediaState", [this] {
+// mTargetCapability.AssertOnCurrentThread();
+// IncreaseMediaCount();
+// }));
+// }
+//
+// private:
+// void IncreaseMediaCount() MOZ_REQUIRES(mTargetCapability) { mMediaCount +=
+// 1; }
+//
+// uint32_t mMediaCount MOZ_GUARDED_BY(mTargetCapability) = 0;
+// EventTargetCapability<nsIEventTarget> mTargetCapability;
+// };
+//
+// NOTE: If you need a thread-safety capability for specifically the main
+// thread, the static `mozilla::sMainThreadCapability` capability exists, and
+// can be asserted using `AssertIsOnMainThread()`.
+
+template <typename T>
+class MOZ_CAPABILITY("event target") EventTargetCapability final {
+ static_assert(std::is_base_of_v<nsIEventTarget, T>,
+ "T must derive from nsIEventTarget");
+
+ public:
+ explicit EventTargetCapability(T* aTarget) : mTarget(aTarget) {
+ MOZ_ASSERT(mTarget, "mTarget should be non-null");
+ }
+ ~EventTargetCapability() = default;
+
+ EventTargetCapability(const EventTargetCapability&) = default;
+ EventTargetCapability(EventTargetCapability&&) = default;
+ EventTargetCapability& operator=(const EventTargetCapability&) = default;
+ EventTargetCapability& operator=(EventTargetCapability&&) = default;
+
+ void AssertOnCurrentThread() const MOZ_ASSERT_CAPABILITY(this) {
+ MOZ_ASSERT(IsOnCurrentThread());
+ }
+
+ // Allow users to check if we're on the same thread as the event target.
+ bool IsOnCurrentThread() const { return mTarget->IsOnCurrentThread(); }
+
+ // Allow users to get the event target, so classes don't have to store the
+ // target as a separate member to use it.
+ T* GetEventTarget() const { return mTarget; }
+
+ // Helper to simplify dispatching to mTarget.
+ nsresult Dispatch(already_AddRefed<nsIRunnable> aRunnable,
+ uint32_t aFlags = NS_DISPATCH_NORMAL) const {
+ return mTarget->Dispatch(std::move(aRunnable), aFlags);
+ }
+
+ private:
+ RefPtr<T> mTarget;
+};
+
+} // namespace mozilla
+
+#endif // XPCOM_THREADS_EVENTTARGETCAPABILITY_H_
diff --git a/xpcom/threads/IdlePeriodState.cpp b/xpcom/threads/IdlePeriodState.cpp
new file mode 100644
index 0000000000..ca7f15321f
--- /dev/null
+++ b/xpcom/threads/IdlePeriodState.cpp
@@ -0,0 +1,255 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/AppShutdown.h"
+#include "mozilla/IdlePeriodState.h"
+#include "mozilla/StaticPrefs_idle_period.h"
+#include "mozilla/ipc/IdleSchedulerChild.h"
+#include "nsIIdlePeriod.h"
+#include "nsThreadManager.h"
+#include "nsThreadUtils.h"
+#include "nsXPCOM.h"
+#include "nsXULAppAPI.h"
+
+static uint64_t sIdleRequestCounter = 0;
+
+namespace mozilla {
+
+IdlePeriodState::IdlePeriodState(already_AddRefed<nsIIdlePeriod>&& aIdlePeriod)
+ : mIdlePeriod(aIdlePeriod) {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+}
+
+IdlePeriodState::~IdlePeriodState() {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+ if (mIdleScheduler) {
+ mIdleScheduler->Disconnect();
+ }
+}
+
+size_t IdlePeriodState::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
+ size_t n = 0;
+ if (mIdlePeriod) {
+ n += aMallocSizeOf(mIdlePeriod);
+ }
+
+ return n;
+}
+
+void IdlePeriodState::FlagNotIdle() {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+
+ EnsureIsActive();
+ if (mIdleToken && mIdleToken < TimeStamp::Now()) {
+ ClearIdleToken();
+ }
+}
+
+void IdlePeriodState::RanOutOfTasks(const MutexAutoUnlock& aProofOfUnlock) {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+ MOZ_ASSERT(!mHasPendingEventsPromisedIdleEvent);
+ EnsureIsPaused(aProofOfUnlock);
+ ClearIdleToken();
+}
+
+TimeStamp IdlePeriodState::GetIdleDeadlineInternal(
+ bool aIsPeek, const MutexAutoUnlock& aProofOfUnlock) {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+
+ bool shuttingDown;
+ TimeStamp localIdleDeadline =
+ GetLocalIdleDeadline(shuttingDown, aProofOfUnlock);
+ if (!localIdleDeadline) {
+ if (!aIsPeek) {
+ EnsureIsPaused(aProofOfUnlock);
+ ClearIdleToken();
+ }
+ return TimeStamp();
+ }
+
+ TimeStamp idleDeadline =
+ mHasPendingEventsPromisedIdleEvent || shuttingDown
+ ? localIdleDeadline
+ : GetIdleToken(localIdleDeadline, aProofOfUnlock);
+ if (!idleDeadline) {
+ if (!aIsPeek) {
+ EnsureIsPaused(aProofOfUnlock);
+
+ // Don't call ClearIdleToken() here, since we may have a pending
+ // request already.
+ //
+ // RequestIdleToken can do all sorts of IPC stuff that might
+ // take mutexes. This is one reason why we need the
+ // MutexAutoUnlock reference!
+ RequestIdleToken(localIdleDeadline);
+ }
+ return TimeStamp();
+ }
+
+ if (!aIsPeek) {
+ EnsureIsActive();
+ }
+ return idleDeadline;
+}
+
+TimeStamp IdlePeriodState::GetLocalIdleDeadline(
+ bool& aShuttingDown, const MutexAutoUnlock& aProofOfUnlock) {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+ // If we are shutting down, we won't honor the idle period, and we will
+ // always process idle runnables. This will ensure that the idle queue
+ // gets exhausted at shutdown time to prevent intermittently leaking
+ // some runnables inside that queue and even worse potentially leaving
+ // some important cleanup work unfinished.
+ if (AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMShutdownThreads) ||
+ nsThreadManager::get().GetCurrentThread()->ShuttingDown()) {
+ aShuttingDown = true;
+ return TimeStamp::Now();
+ }
+
+ aShuttingDown = false;
+ TimeStamp idleDeadline;
+ // This GetIdlePeriodHint() call is the reason we need a MutexAutoUnlock here.
+ mIdlePeriod->GetIdlePeriodHint(&idleDeadline);
+
+ // If HasPendingEvents() has been called and it has returned true because of
+ // pending idle events, there is a risk that we may decide here that we aren't
+ // idle and return null, in which case HasPendingEvents() has effectively
+ // lied. Since we can't go back and fix the past, we have to adjust what we
+ // do here and forcefully pick the idle queue task here. Note that this means
+ // that we are choosing to run a task from the idle queue when we would
+ // normally decide that we aren't in an idle period, but this can only happen
+ // if we fall out of the idle period in between the call to HasPendingEvents()
+ // and here, which should hopefully be quite rare. We are effectively
+ // choosing to prioritize the sanity of our API semantics over the optimal
+ // scheduling.
+ if (!mHasPendingEventsPromisedIdleEvent &&
+ (!idleDeadline || idleDeadline < TimeStamp::Now())) {
+ return TimeStamp();
+ }
+ if (mHasPendingEventsPromisedIdleEvent && !idleDeadline) {
+ // If HasPendingEvents() has been called and it has returned true, but we're
+ // no longer in the idle period, we must return a valid timestamp to pretend
+ // that we are still in the idle period.
+ return TimeStamp::Now();
+ }
+ return idleDeadline;
+}
+
+TimeStamp IdlePeriodState::GetIdleToken(TimeStamp aLocalIdlePeriodHint,
+ const MutexAutoUnlock& aProofOfUnlock) {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+
+ if (!ShouldGetIdleToken()) {
+ return aLocalIdlePeriodHint;
+ }
+
+ if (mIdleToken) {
+ TimeStamp now = TimeStamp::Now();
+ if (mIdleToken < now) {
+ ClearIdleToken();
+ return mIdleToken;
+ }
+ return mIdleToken < aLocalIdlePeriodHint ? mIdleToken
+ : aLocalIdlePeriodHint;
+ }
+ return TimeStamp();
+}
+
+void IdlePeriodState::RequestIdleToken(TimeStamp aLocalIdlePeriodHint) {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+ MOZ_ASSERT(!mActive);
+
+ if (!mIdleSchedulerInitialized) {
+ mIdleSchedulerInitialized = true;
+ if (ShouldGetIdleToken()) {
+ // For now cross-process idle scheduler is supported only on the main
+ // threads of the child processes.
+ mIdleScheduler = ipc::IdleSchedulerChild::GetMainThreadIdleScheduler();
+ if (mIdleScheduler) {
+ mIdleScheduler->Init(this);
+ }
+ }
+ }
+
+ if (mIdleScheduler && !mIdleRequestId) {
+ TimeStamp now = TimeStamp::Now();
+ if (aLocalIdlePeriodHint <= now) {
+ return;
+ }
+
+ mIdleRequestId = ++sIdleRequestCounter;
+ mIdleScheduler->SendRequestIdleTime(mIdleRequestId,
+ aLocalIdlePeriodHint - now);
+ }
+}
+
+void IdlePeriodState::SetIdleToken(uint64_t aId, TimeDuration aDuration) {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+
+ // We check the request ID. It's possible that the server may be granting a
+ // an ealier request that the client has since cancelled and re-requested.
+ if (mIdleRequestId == aId) {
+ mIdleToken = TimeStamp::Now() + aDuration;
+ }
+}
+
+void IdlePeriodState::SetActive() {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+ MOZ_ASSERT(!mActive);
+ if (mIdleScheduler) {
+ mIdleScheduler->SetActive();
+ }
+ mActive = true;
+}
+
+void IdlePeriodState::SetPaused(const MutexAutoUnlock& aProofOfUnlock) {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+ MOZ_ASSERT(mActive);
+ if (mIdleScheduler && mIdleScheduler->SetPaused()) {
+ // We may have gotten a free cpu core for running idle tasks.
+ // We don't try to catch the case when there are prioritized processes
+ // running.
+
+ // This SendSchedule call is why we need the MutexAutoUnlock here, because
+ // IPC can do weird things with mutexes.
+ mIdleScheduler->SendSchedule();
+ }
+ mActive = false;
+}
+
+void IdlePeriodState::ClearIdleToken() {
+ MOZ_ASSERT(NS_IsMainThread(),
+ "Why are we touching idle state off the main thread?");
+
+ if (mIdleRequestId) {
+ if (mIdleScheduler) {
+ // This SendIdleTimeUsed call is why we need to not be holding
+ // any locks here, because IPC can do weird things with mutexes.
+ // Ideally we'd have a MutexAutoUnlock& reference here, but some
+ // callers end up here while just not holding any locks at all.
+ mIdleScheduler->SendIdleTimeUsed(mIdleRequestId);
+ }
+ mIdleRequestId = 0;
+ mIdleToken = TimeStamp();
+ }
+}
+
+bool IdlePeriodState::ShouldGetIdleToken() {
+ return StaticPrefs::idle_period_cross_process_scheduling() &&
+ XRE_IsContentProcess();
+}
+} // namespace mozilla
diff --git a/xpcom/threads/IdlePeriodState.h b/xpcom/threads/IdlePeriodState.h
new file mode 100644
index 0000000000..d50fce64be
--- /dev/null
+++ b/xpcom/threads/IdlePeriodState.h
@@ -0,0 +1,201 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_IdlePeriodState_h
+#define mozilla_IdlePeriodState_h
+
+/**
+ * A class for tracking the state of our idle period. This includes keeping
+ * track of both the state of our process-local idle period estimate and, for
+ * content processes, managing communication with the parent process for
+ * cross-pprocess idle detection.
+ */
+
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/TimeStamp.h"
+#include "nsCOMPtr.h"
+
+#include <stdint.h>
+
+class nsIIdlePeriod;
+
+namespace mozilla {
+class TaskManager;
+namespace ipc {
+class IdleSchedulerChild;
+} // namespace ipc
+
+class IdlePeriodState {
+ public:
+ explicit IdlePeriodState(already_AddRefed<nsIIdlePeriod>&& aIdlePeriod);
+
+ ~IdlePeriodState();
+
+ // Integration with memory reporting.
+ size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
+
+ // Notification that whoever we are tracking idle state for has found a
+ // non-idle task to process.
+ //
+ // Must not be called while holding any locks.
+ void FlagNotIdle();
+
+ // Notification that whoever we are tracking idle state for has no more
+ // tasks (idle or not) to process.
+ //
+ // aProofOfUnlock is the proof that our caller unlocked its mutex.
+ void RanOutOfTasks(const MutexAutoUnlock& aProofOfUnlock);
+
+ // Notification that whoever we are tracking idle state has idle tasks that
+ // they are considering ready to run and that we should keep claiming they are
+ // ready to run until they call ForgetPendingTaskGuarantee().
+ void EnforcePendingTaskGuarantee() {
+ mHasPendingEventsPromisedIdleEvent = true;
+ }
+
+ // Notification that whoever we are tracking idle state for is done with our
+ // "we have an idle event ready to run" guarantee. When this happens, we can
+ // reset mHasPendingEventsPromisedIdleEvent to false, because we have
+ // fulfilled our contract.
+ void ForgetPendingTaskGuarantee() {
+ mHasPendingEventsPromisedIdleEvent = false;
+ }
+
+ // Update our cached idle deadline so consumers can use it while holding
+ // locks. Consumers must ClearCachedIdleDeadline() once they are done.
+ void UpdateCachedIdleDeadline(const MutexAutoUnlock& aProofOfUnlock) {
+ mCachedIdleDeadline = GetIdleDeadlineInternal(false, aProofOfUnlock);
+ }
+
+ // If we have local idle deadline, but don't have an idle token, this will
+ // request such from the parent process when this is called in a child
+ // process.
+ void RequestIdleDeadlineIfNeeded(const MutexAutoUnlock& aProofOfUnlock) {
+ GetIdleDeadlineInternal(false, aProofOfUnlock);
+ }
+
+ // Reset our cached idle deadline, so we stop allowing idle runnables to run.
+ void ClearCachedIdleDeadline() { mCachedIdleDeadline = TimeStamp(); }
+
+ // Get the current cached idle deadline. This may return a null timestamp.
+ TimeStamp GetCachedIdleDeadline() { return mCachedIdleDeadline; }
+
+ // Peek our current idle deadline into mCachedIdleDeadline. This can cause
+ // mCachedIdleDeadline to be a null timestamp (which means we are not idle
+ // right now). This method does not have any side-effects on our state, apart
+ // from guaranteeing that if it returns non-null then GetDeadlineForIdleTask
+ // will return non-null until ForgetPendingTaskGuarantee() is called, and its
+ // effects on mCachedIdleDeadline.
+ //
+ // aProofOfUnlock is the proof that our caller unlocked its mutex.
+ void CachePeekedIdleDeadline(const MutexAutoUnlock& aProofOfUnlock) {
+ mCachedIdleDeadline = GetIdleDeadlineInternal(true, aProofOfUnlock);
+ }
+
+ void SetIdleToken(uint64_t aId, TimeDuration aDuration);
+
+ bool IsActive() { return mActive; }
+
+ protected:
+ void EnsureIsActive() {
+ if (!mActive) {
+ SetActive();
+ }
+ }
+
+ void EnsureIsPaused(const MutexAutoUnlock& aProofOfUnlock) {
+ if (mActive) {
+ SetPaused(aProofOfUnlock);
+ }
+ }
+
+ // Returns a null TimeStamp if we're not in the idle period.
+ TimeStamp GetLocalIdleDeadline(bool& aShuttingDown,
+ const MutexAutoUnlock& aProofOfUnlock);
+
+ // Gets the idle token, which is the end time of the idle period.
+ //
+ // aProofOfUnlock is the proof that our caller unlocked its mutex.
+ TimeStamp GetIdleToken(TimeStamp aLocalIdlePeriodHint,
+ const MutexAutoUnlock& aProofOfUnlock);
+
+ // In case of child processes, requests idle time from the cross-process
+ // idle scheduler.
+ void RequestIdleToken(TimeStamp aLocalIdlePeriodHint);
+
+ // Mark that we don't have idle time to use, nor are expecting to get an idle
+ // token from the idle scheduler. This must be called while not holding any
+ // locks, but some of the callers aren't holding locks to start with, so
+ // consumers just need to make sure they are not holding locks when they call
+ // this.
+ void ClearIdleToken();
+
+ // SetActive should be called when the event queue is running any type of
+ // tasks.
+ void SetActive();
+ // SetPaused should be called once the event queue doesn't have more
+ // tasks to process, or is waiting for the idle token.
+ //
+ // aProofOfUnlock is the proof that our caller unlocked its mutex.
+ void SetPaused(const MutexAutoUnlock& aProofOfUnlock);
+
+ // Get or peek our idle deadline. When peeking, we generally don't change any
+ // of our internal state. When getting, we may request an idle token as
+ // needed.
+ //
+ // aProofOfUnlock is the proof that our caller unlocked its mutex.
+ TimeStamp GetIdleDeadlineInternal(bool aIsPeek,
+ const MutexAutoUnlock& aProofOfUnlock);
+
+ // Whether we should be getting an idle token (i.e. are a content process
+ // and are using cross process idle scheduling).
+ bool ShouldGetIdleToken();
+
+ // Set to true if we have claimed we have a ready-to-run idle task when asked.
+ // In that case, we will ensure that we allow at least one task to run when
+ // someone tries to run a task, even if we have run out of idle period at that
+ // point. This ensures that we never fail to produce a task to run if we
+ // claim we have a task ready to run.
+ bool mHasPendingEventsPromisedIdleEvent = false;
+
+ // mIdlePeriod keeps track of the current idle period. Calling
+ // mIdlePeriod->GetIdlePeriodHint() will give an estimate of when
+ // the current idle period will end.
+ nsCOMPtr<nsIIdlePeriod> mIdlePeriod;
+
+ // If non-null, this timestamp represents the end time of the idle period. An
+ // idle period starts when we get the idle token from the parent process and
+ // ends when either there are no more things we want to run at idle priority
+ // or mIdleToken < TimeStamp::Now(), so we have reached our idle deadline.
+ TimeStamp mIdleToken;
+
+ // The id of the last idle request to the cross-process idle scheduler.
+ uint64_t mIdleRequestId = 0;
+
+ // If we're in a content process, we use mIdleScheduler to communicate with
+ // the parent process for purposes of cross-process idle tracking.
+ RefPtr<mozilla::ipc::IdleSchedulerChild> mIdleScheduler;
+
+ // Our cached idle deadline. This is set by UpdateCachedIdleDeadline() and
+ // cleared by ClearCachedIdleDeadline(). Consumers should do the former while
+ // not holding any locks, but may do the latter while holding locks.
+ TimeStamp mCachedIdleDeadline;
+
+ // mIdleSchedulerInitialized is true if our mIdleScheduler has been
+ // initialized. It may be null even after initialiazation, in various
+ // situations.
+ bool mIdleSchedulerInitialized = false;
+
+ // mActive is true when the PrioritizedEventQueue or TaskController we are
+ // associated with is running tasks.
+ bool mActive = true;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_IdlePeriodState_h
diff --git a/xpcom/threads/IdleTaskRunner.cpp b/xpcom/threads/IdleTaskRunner.cpp
new file mode 100644
index 0000000000..8b467c54d6
--- /dev/null
+++ b/xpcom/threads/IdleTaskRunner.cpp
@@ -0,0 +1,280 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "IdleTaskRunner.h"
+#include "mozilla/TaskCategory.h"
+#include "mozilla/TaskController.h"
+#include "nsRefreshDriver.h"
+
+namespace mozilla {
+
+already_AddRefed<IdleTaskRunner> IdleTaskRunner::Create(
+ const CallbackType& aCallback, const char* aRunnableName,
+ TimeDuration aStartDelay, TimeDuration aMaxDelay,
+ TimeDuration aMinimumUsefulBudget, bool aRepeating,
+ const MayStopProcessingCallbackType& aMayStopProcessing,
+ const RequestInterruptCallbackType& aRequestInterrupt) {
+ if (aMayStopProcessing && aMayStopProcessing()) {
+ return nullptr;
+ }
+
+ RefPtr<IdleTaskRunner> runner = new IdleTaskRunner(
+ aCallback, aRunnableName, aStartDelay, aMaxDelay, aMinimumUsefulBudget,
+ aRepeating, aMayStopProcessing, aRequestInterrupt);
+ runner->Schedule(false); // Initial scheduling shouldn't use idle dispatch.
+ return runner.forget();
+}
+
+class IdleTaskRunnerTask : public Task {
+ public:
+ explicit IdleTaskRunnerTask(IdleTaskRunner* aRunner)
+ : Task(true, EventQueuePriority::Idle),
+ mRunner(aRunner),
+ mRequestInterrupt(aRunner->mRequestInterrupt) {
+ SetManager(TaskController::Get()->GetIdleTaskManager());
+ }
+
+ bool Run() override {
+ if (mRunner) {
+ // IdleTaskRunner::Run can actually trigger the destruction of the
+ // IdleTaskRunner. Make sure it doesn't get destroyed before the method
+ // finished.
+ RefPtr<IdleTaskRunner> runner(mRunner);
+ runner->Run();
+ }
+ return true;
+ }
+
+ void SetIdleDeadline(TimeStamp aDeadline) override {
+ if (mRunner) {
+ mRunner->SetIdleDeadline(aDeadline);
+ }
+ }
+
+ void Cancel() { mRunner = nullptr; }
+
+ bool GetName(nsACString& aName) override {
+ if (mRunner) {
+ aName.Assign(mRunner->GetName());
+ } else {
+ aName.Assign("ExpiredIdleTaskRunner");
+ }
+ return true;
+ }
+
+ void RequestInterrupt(uint32_t aInterruptPriority) override {
+ if (mRequestInterrupt) {
+ mRequestInterrupt(aInterruptPriority);
+ }
+ }
+
+ private:
+ IdleTaskRunner* mRunner;
+
+ // Copied here and invoked even if there is no mRunner currently, to avoid
+ // race conditions checking mRunner when an interrupt is requested.
+ IdleTaskRunner::RequestInterruptCallbackType mRequestInterrupt;
+};
+
+IdleTaskRunner::IdleTaskRunner(
+ const CallbackType& aCallback, const char* aRunnableName,
+ TimeDuration aStartDelay, TimeDuration aMaxDelay,
+ TimeDuration aMinimumUsefulBudget, bool aRepeating,
+ const MayStopProcessingCallbackType& aMayStopProcessing,
+ const RequestInterruptCallbackType& aRequestInterrupt)
+ : mCallback(aCallback),
+ mStartTime(TimeStamp::Now() + aStartDelay),
+ mMaxDelay(aMaxDelay),
+ mMinimumUsefulBudget(aMinimumUsefulBudget),
+ mRepeating(aRepeating),
+ mTimerActive(false),
+ mMayStopProcessing(aMayStopProcessing),
+ mRequestInterrupt(aRequestInterrupt),
+ mName(aRunnableName) {}
+
+void IdleTaskRunner::Run() {
+ if (!mCallback) {
+ return;
+ }
+
+ // Deadline is null when called from timer or RunNextCollectorTimer rather
+ // than during idle time.
+ TimeStamp now = TimeStamp::Now();
+
+ // Note that if called from RunNextCollectorTimer, we may not have reached
+ // mStartTime yet. Pretend we are overdue for idle time.
+ bool overdueForIdle = mDeadline.IsNull();
+ bool didRun = false;
+ bool allowIdleDispatch = false;
+
+ if (mTask) {
+ // If we find ourselves here we should usually be running from this task,
+ // but there are exceptions. In any case we're doing the work now and don't
+ // need our task going forward unless we're re-scheduled.
+ nsRefreshDriver::CancelIdleTask(mTask);
+ // Extra safety, make sure a task can never have a dangling ptr.
+ mTask->Cancel();
+ mTask = nullptr;
+ }
+
+ if (overdueForIdle || ((now + mMinimumUsefulBudget) < mDeadline)) {
+ CancelTimer();
+ didRun = mCallback(mDeadline);
+ // If we didn't do meaningful work, don't schedule using immediate
+ // idle dispatch, since that could lead to a loop until the idle
+ // period ends.
+ allowIdleDispatch = didRun;
+ } else if (now >= mDeadline) {
+ allowIdleDispatch = true;
+ }
+
+ if (mCallback && (mRepeating || !didRun)) {
+ Schedule(allowIdleDispatch);
+ } else {
+ mCallback = nullptr;
+ }
+}
+
+static void TimedOut(nsITimer* aTimer, void* aClosure) {
+ RefPtr<IdleTaskRunner> runner = static_cast<IdleTaskRunner*>(aClosure);
+ runner->Run();
+}
+
+void IdleTaskRunner::SetIdleDeadline(mozilla::TimeStamp aDeadline) {
+ mDeadline = aDeadline;
+}
+
+void IdleTaskRunner::SetMinimumUsefulBudget(int64_t aMinimumUsefulBudget) {
+ mMinimumUsefulBudget = TimeDuration::FromMilliseconds(aMinimumUsefulBudget);
+}
+
+void IdleTaskRunner::SetTimer(TimeDuration aDelay, nsIEventTarget* aTarget) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(aTarget->IsOnCurrentThread());
+ // aTarget is always the main thread event target provided from
+ // NS_DispatchToCurrentThreadQueue(). We ignore aTarget here to ensure that
+ // CollectorRunner always run specifically the main thread.
+ SetTimerInternal(aDelay);
+}
+
+void IdleTaskRunner::Cancel() {
+ CancelTimer();
+ mTimer = nullptr;
+ mScheduleTimer = nullptr;
+ mCallback = nullptr;
+}
+
+static void ScheduleTimedOut(nsITimer* aTimer, void* aClosure) {
+ RefPtr<IdleTaskRunner> runnable = static_cast<IdleTaskRunner*>(aClosure);
+ runnable->Schedule(true);
+}
+
+void IdleTaskRunner::Schedule(bool aAllowIdleDispatch) {
+ if (!mCallback) {
+ return;
+ }
+
+ if (mMayStopProcessing && mMayStopProcessing()) {
+ Cancel();
+ return;
+ }
+
+ mDeadline = TimeStamp();
+
+ TimeStamp now = TimeStamp::Now();
+ bool useRefreshDriver = false;
+ if (now >= mStartTime) {
+ // Detect whether the refresh driver is ticking by checking if
+ // GetIdleDeadlineHint returns its input parameter.
+ useRefreshDriver =
+ (nsRefreshDriver::GetIdleDeadlineHint(
+ now, nsRefreshDriver::IdleCheck::OnlyThisProcessRefreshDriver) !=
+ now);
+ } else {
+ NS_WARNING_ASSERTION(!aAllowIdleDispatch,
+ "early callback, or time went backwards");
+ }
+
+ if (useRefreshDriver) {
+ if (!mTask) {
+ // If a task was already scheduled, no point rescheduling.
+ mTask = new IdleTaskRunnerTask(this);
+ // RefreshDriver is ticking, let it schedule the idle dispatch.
+ nsRefreshDriver::DispatchIdleTaskAfterTickUnlessExists(mTask);
+ }
+ // Ensure we get called at some point, even if RefreshDriver is stopped.
+ SetTimerInternal(mMaxDelay);
+ } else {
+ // RefreshDriver doesn't seem to be running.
+ if (aAllowIdleDispatch) {
+ SetTimerInternal(mMaxDelay);
+ if (!mTask) {
+ // If we have mTask we've already scheduled one, and the refresh driver
+ // shouldn't be running if we hit this code path.
+ mTask = new IdleTaskRunnerTask(this);
+ RefPtr<Task> task(mTask);
+ TaskController::Get()->AddTask(task.forget());
+ }
+ } else {
+ if (!mScheduleTimer) {
+ mScheduleTimer = NS_NewTimer();
+ if (!mScheduleTimer) {
+ return;
+ }
+ } else {
+ mScheduleTimer->Cancel();
+ }
+ // We weren't allowed to do idle dispatch immediately, do it after a
+ // short timeout. (Or wait for our start time if we haven't started yet.)
+ uint32_t waitToSchedule = 16; /* ms */
+ if (now < mStartTime) {
+ // + 1 to round milliseconds up to be sure to wait until after
+ // mStartTime.
+ waitToSchedule = (mStartTime - now).ToMilliseconds() + 1;
+ }
+ mScheduleTimer->InitWithNamedFuncCallback(
+ ScheduleTimedOut, this, waitToSchedule,
+ nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY, mName);
+ }
+ }
+}
+
+IdleTaskRunner::~IdleTaskRunner() { CancelTimer(); }
+
+void IdleTaskRunner::CancelTimer() {
+ if (mTask) {
+ nsRefreshDriver::CancelIdleTask(mTask);
+ mTask->Cancel();
+ mTask = nullptr;
+ }
+ if (mTimer) {
+ mTimer->Cancel();
+ }
+ if (mScheduleTimer) {
+ mScheduleTimer->Cancel();
+ }
+ mTimerActive = false;
+}
+
+void IdleTaskRunner::SetTimerInternal(TimeDuration aDelay) {
+ if (mTimerActive) {
+ return;
+ }
+
+ if (!mTimer) {
+ mTimer = NS_NewTimer();
+ } else {
+ mTimer->Cancel();
+ }
+
+ if (mTimer) {
+ mTimer->InitWithNamedFuncCallback(TimedOut, this, aDelay.ToMilliseconds(),
+ nsITimer::TYPE_ONE_SHOT, mName);
+ mTimerActive = true;
+ }
+}
+
+} // end of namespace mozilla
diff --git a/xpcom/threads/IdleTaskRunner.h b/xpcom/threads/IdleTaskRunner.h
new file mode 100644
index 0000000000..4052021f08
--- /dev/null
+++ b/xpcom/threads/IdleTaskRunner.h
@@ -0,0 +1,122 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef IdleTaskRunner_h
+#define IdleTaskRunner_h
+
+#include "mozilla/TimeStamp.h"
+#include "nsIEventTarget.h"
+#include "nsISupports.h"
+#include "nsITimer.h"
+#include <functional>
+
+namespace mozilla {
+
+class IdleTaskRunnerTask;
+
+// A general purpose repeating callback runner (it can be configured to a
+// one-time runner, too.) If it is running repeatedly, one has to either
+// explicitly Cancel() the runner or have MayStopProcessing() callback return
+// true to completely remove the runner.
+class IdleTaskRunner {
+ public:
+ friend class IdleTaskRunnerTask;
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(IdleTaskRunner)
+
+ // Return true if some meaningful work was done.
+ using CallbackType = std::function<bool(TimeStamp aDeadline)>;
+
+ // A callback for "stop processing" decision. Return true to
+ // stop processing. This can be an alternative to Cancel() or
+ // work together in different way.
+ using MayStopProcessingCallbackType = std::function<bool()>;
+
+ // A callback to be invoked when an interrupt is requested
+ // (eg during an idle activity when the user presses a key.)
+ // The callback takes an "interrupt priority" value as its
+ // sole parameter.
+ using RequestInterruptCallbackType = std::function<void(uint32_t)>;
+
+ public:
+ // An IdleTaskRunner has (up to) three phases:
+ //
+ // - (duration aStartDelay) waiting to run (aStartDelay can be zero)
+ //
+ // - (duration aMaxDelay) attempting to find a long enough amount of idle
+ // time, at least aMinimumUsefulBudget
+ //
+ // - overdue for idle time, run as soon as possible
+ //
+ // If aRepeating is true, then aStartDelay applies only to the first run; the
+ // second run will attempt to run in the first idle slice that is long
+ // enough.
+ //
+ // All durations are in milliseconds.
+ //
+ static already_AddRefed<IdleTaskRunner> Create(
+ const CallbackType& aCallback, const char* aRunnableName,
+ TimeDuration aStartDelay, TimeDuration aMaxDelay,
+ TimeDuration aMinimumUsefulBudget, bool aRepeating,
+ const MayStopProcessingCallbackType& aMayStopProcessing,
+ const RequestInterruptCallbackType& aRequestInterrupt = nullptr);
+
+ void Run();
+
+ // (Used by the task triggering code.) Record the end of the current idle
+ // period, or null if not running during idle time.
+ void SetIdleDeadline(mozilla::TimeStamp aDeadline);
+
+ void SetTimer(TimeDuration aDelay, nsIEventTarget* aTarget);
+
+ // Update the minimum idle time that this callback would be invoked for.
+ void SetMinimumUsefulBudget(int64_t aMinimumUsefulBudget);
+
+ void Cancel();
+
+ void Schedule(bool aAllowIdleDispatch);
+
+ const char* GetName() { return mName; }
+
+ private:
+ explicit IdleTaskRunner(
+ const CallbackType& aCallback, const char* aRunnableName,
+ TimeDuration aStartDelay, TimeDuration aMaxDelay,
+ TimeDuration aMinimumUsefulBudget, bool aRepeating,
+ const MayStopProcessingCallbackType& aMayStopProcessing,
+ const RequestInterruptCallbackType& aRequestInterrupt);
+ ~IdleTaskRunner();
+ void CancelTimer();
+ void SetTimerInternal(TimeDuration aDelay);
+
+ nsCOMPtr<nsITimer> mTimer;
+ nsCOMPtr<nsITimer> mScheduleTimer;
+ CallbackType mCallback;
+
+ // Do not run until this time.
+ const mozilla::TimeStamp mStartTime;
+
+ // Wait this long for idle time before giving up and running a non-idle
+ // callback.
+ TimeDuration mMaxDelay;
+
+ // If running during idle time, the expected end of the current idle period.
+ // The null timestamp when the run is triggered by aMaxDelay instead of idle.
+ TimeStamp mDeadline;
+
+ // The least duration worth calling the callback for during idle time.
+ TimeDuration mMinimumUsefulBudget;
+
+ bool mRepeating;
+ bool mTimerActive;
+ MayStopProcessingCallbackType mMayStopProcessing;
+ RequestInterruptCallbackType mRequestInterrupt;
+ const char* mName;
+ RefPtr<IdleTaskRunnerTask> mTask;
+};
+
+} // end of namespace mozilla.
+
+#endif
diff --git a/xpcom/threads/InputTaskManager.cpp b/xpcom/threads/InputTaskManager.cpp
new file mode 100644
index 0000000000..42b2814290
--- /dev/null
+++ b/xpcom/threads/InputTaskManager.cpp
@@ -0,0 +1,156 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "InputTaskManager.h"
+#include "VsyncTaskManager.h"
+#include "nsRefreshDriver.h"
+
+namespace mozilla {
+
+StaticRefPtr<InputTaskManager> InputTaskManager::gInputTaskManager;
+
+void InputTaskManager::EnableInputEventPrioritization() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(mInputQueueState == STATE_DISABLED);
+ mInputQueueState = STATE_ENABLED;
+}
+
+void InputTaskManager::FlushInputEventPrioritization() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(mInputQueueState == STATE_ENABLED ||
+ mInputQueueState == STATE_SUSPEND);
+ mInputQueueState =
+ mInputQueueState == STATE_ENABLED ? STATE_FLUSHING : STATE_SUSPEND;
+}
+
+void InputTaskManager::SuspendInputEventPrioritization() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(mInputQueueState == STATE_ENABLED ||
+ mInputQueueState == STATE_FLUSHING);
+ mInputQueueState = STATE_SUSPEND;
+}
+
+void InputTaskManager::ResumeInputEventPrioritization() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(mInputQueueState == STATE_SUSPEND);
+ mInputQueueState = STATE_ENABLED;
+}
+
+int32_t InputTaskManager::GetPriorityModifierForEventLoopTurn(
+ const MutexAutoLock& aProofOfLock) {
+ // When the state is disabled, the input task that we have is
+ // very likely SuspendInputEventQueue, so here we also use
+ // normal priority as ResumeInputEventQueue, FlushInputEventQueue and
+ // SetInputEventQueueEnabled all uses normal priority, to
+ // ensure the ordering is correct.
+ if (State() == InputTaskManager::STATE_DISABLED) {
+ return static_cast<int32_t>(EventQueuePriority::Normal) -
+ static_cast<int32_t>(EventQueuePriority::InputHigh);
+ }
+
+ return GetPriorityModifierForEventLoopTurnForStrictVsyncAlignment();
+}
+
+void InputTaskManager::WillRunTask() {
+ TaskManager::WillRunTask();
+ mInputPriorityController.WillRunTask();
+}
+
+int32_t
+InputTaskManager::GetPriorityModifierForEventLoopTurnForStrictVsyncAlignment() {
+ MOZ_ASSERT(!IsSuspended());
+
+ size_t inputCount = PendingTaskCount();
+ if (inputCount > 0 &&
+ mInputPriorityController.ShouldUseHighestPriority(this)) {
+ return static_cast<int32_t>(EventQueuePriority::InputHighest) -
+ static_cast<int32_t>(EventQueuePriority::InputHigh);
+ }
+
+ if (State() == STATE_FLUSHING ||
+ nsRefreshDriver::GetNextTickHint().isNothing()) {
+ return 0;
+ }
+
+ return static_cast<int32_t>(EventQueuePriority::InputLow) -
+ static_cast<int32_t>(EventQueuePriority::InputHigh);
+}
+
+InputTaskManager::InputPriorityController::InputPriorityController()
+ : mInputVsyncState(InputVsyncState::NoPendingVsync) {}
+
+bool InputTaskManager::InputPriorityController::ShouldUseHighestPriority(
+ InputTaskManager* aInputTaskManager) {
+ if (mInputVsyncState == InputVsyncState::HasPendingVsync) {
+ return true;
+ }
+
+ if (mInputVsyncState == InputVsyncState::RunVsync) {
+ return false;
+ }
+
+ if (mInputVsyncState == InputVsyncState::NoPendingVsync &&
+ VsyncTaskManager::Get()->PendingTaskCount()) {
+ EnterPendingVsyncState(aInputTaskManager->PendingTaskCount());
+ return true;
+ }
+
+ return false;
+}
+
+void InputTaskManager::InputPriorityController::EnterPendingVsyncState(
+ uint32_t aNumPendingTasks) {
+ MOZ_ASSERT(mInputVsyncState == InputVsyncState::NoPendingVsync);
+
+ mInputVsyncState = InputVsyncState::HasPendingVsync;
+ mMaxInputTasksToRun = aNumPendingTasks;
+ mRunInputStartTime = TimeStamp::Now();
+}
+
+void InputTaskManager::InputPriorityController::WillRunVsync() {
+ if (mInputVsyncState == InputVsyncState::RunVsync ||
+ mInputVsyncState == InputVsyncState::HasPendingVsync) {
+ LeavePendingVsyncState(false);
+ }
+}
+
+void InputTaskManager::InputPriorityController::LeavePendingVsyncState(
+ bool aRunVsync) {
+ if (aRunVsync) {
+ MOZ_ASSERT(mInputVsyncState == InputVsyncState::HasPendingVsync);
+ mInputVsyncState = InputVsyncState::RunVsync;
+ } else {
+ mInputVsyncState = InputVsyncState::NoPendingVsync;
+ }
+
+ mMaxInputTasksToRun = 0;
+}
+
+void InputTaskManager::InputPriorityController::WillRunTask() {
+ switch (mInputVsyncState) {
+ case InputVsyncState::NoPendingVsync:
+ return;
+ case InputVsyncState::HasPendingVsync:
+ MOZ_ASSERT(mMaxInputTasksToRun > 0);
+ --mMaxInputTasksToRun;
+ if (!mMaxInputTasksToRun ||
+ TimeStamp::Now() - mRunInputStartTime >=
+ TimeDuration::FromMilliseconds(
+ StaticPrefs::dom_input_event_queue_duration_max())) {
+ LeavePendingVsyncState(true);
+ }
+ return;
+ default:
+ MOZ_DIAGNOSTIC_ASSERT(
+ false, "Shouldn't run this input task when we suppose to run vsync");
+ return;
+ }
+}
+
+// static
+void InputTaskManager::Init() { gInputTaskManager = new InputTaskManager(); }
+
+} // namespace mozilla
diff --git a/xpcom/threads/InputTaskManager.h b/xpcom/threads/InputTaskManager.h
new file mode 100644
index 0000000000..2f920a31ae
--- /dev/null
+++ b/xpcom/threads/InputTaskManager.h
@@ -0,0 +1,141 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_InputTaskManager_h
+#define mozilla_InputTaskManager_h
+
+#include "nsTArray.h"
+#include "nsXULAppAPI.h"
+#include "TaskController.h"
+#include "mozilla/StaticPtr.h"
+#include "mozilla/StaticPrefs_dom.h"
+
+namespace mozilla {
+
+class InputTaskManager : public TaskManager {
+ public:
+ int32_t GetPriorityModifierForEventLoopTurn(
+ const MutexAutoLock& aProofOfLock) final;
+ void WillRunTask() final;
+
+ enum InputEventQueueState {
+ STATE_DISABLED,
+ STATE_FLUSHING,
+ STATE_SUSPEND,
+ STATE_ENABLED
+ };
+
+ void EnableInputEventPrioritization();
+ void FlushInputEventPrioritization();
+ void SuspendInputEventPrioritization();
+ void ResumeInputEventPrioritization();
+
+ InputEventQueueState State() { return mInputQueueState; }
+
+ void SetState(InputEventQueueState aState) { mInputQueueState = aState; }
+
+ static InputTaskManager* Get() { return gInputTaskManager.get(); }
+ static void Cleanup() { gInputTaskManager = nullptr; }
+ static void Init();
+
+ bool IsSuspended(const MutexAutoLock& aProofOfLock) override {
+ MOZ_ASSERT(NS_IsMainThread());
+ return mInputQueueState == STATE_SUSPEND || mSuspensionLevel > 0;
+ }
+
+ bool IsSuspended() {
+ MOZ_ASSERT(NS_IsMainThread());
+ return mSuspensionLevel > 0;
+ }
+
+ void IncSuspensionLevel() {
+ MOZ_ASSERT(NS_IsMainThread());
+ ++mSuspensionLevel;
+ }
+
+ void DecSuspensionLevel() {
+ MOZ_ASSERT(NS_IsMainThread());
+ --mSuspensionLevel;
+ }
+
+ static bool CanSuspendInputEvent() {
+ // Ensure it's content process because InputTaskManager only
+ // works in e10s.
+ //
+ // Input tasks will have nullptr as their task manager when the
+ // event queue state is STATE_DISABLED, so we can't suspend
+ // input events.
+ return XRE_IsContentProcess() &&
+ StaticPrefs::dom_input_events_canSuspendInBCG_enabled() &&
+ InputTaskManager::Get()->State() !=
+ InputEventQueueState::STATE_DISABLED;
+ }
+
+ void NotifyVsync() { mInputPriorityController.WillRunVsync(); }
+
+ private:
+ InputTaskManager() : mInputQueueState(STATE_DISABLED) {}
+
+ class InputPriorityController {
+ public:
+ InputPriorityController();
+ // Determines whether we should use the highest input priority for input
+ // tasks
+ bool ShouldUseHighestPriority(InputTaskManager*);
+
+ void WillRunVsync();
+
+ // Gets called when a input task is going to run; If the current
+ // input vsync state is `HasPendingVsync`, determines whether we
+ // should continue running input tasks or leave the `HasPendingVsync` state
+ // based on
+ // 1. Whether we still have time to process input tasks
+ // 2. Whether we have processed the max number of tasks that
+ // we should process.
+ void WillRunTask();
+
+ private:
+ // Used to represents the relationship between Input and Vsync.
+ //
+ // HasPendingVsync: There are pending vsync tasks and we are using
+ // InputHighest priority for inputs.
+ // NoPendingVsync: No pending vsync tasks and no need to use InputHighest
+ // priority.
+ // RunVsync: Finished running input tasks and the vsync task
+ // should be run.
+ enum class InputVsyncState {
+ HasPendingVsync,
+ NoPendingVsync,
+ RunVsync,
+ };
+
+ void EnterPendingVsyncState(uint32_t aNumPendingTasks);
+ void LeavePendingVsyncState(bool aRunVsync);
+
+ // Stores the number of pending input tasks when we enter the
+ // InputVsyncState::HasPendingVsync state.
+ uint32_t mMaxInputTasksToRun = 0;
+
+ InputVsyncState mInputVsyncState;
+
+ TimeStamp mRunInputStartTime;
+ };
+
+ int32_t GetPriorityModifierForEventLoopTurnForStrictVsyncAlignment();
+
+ Atomic<InputEventQueueState> mInputQueueState;
+
+ static StaticRefPtr<InputTaskManager> gInputTaskManager;
+
+ // Number of BCGs have asked InputTaskManager to suspend input events
+ uint32_t mSuspensionLevel = 0;
+
+ InputPriorityController mInputPriorityController;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_InputTaskManager_h
diff --git a/xpcom/threads/LazyIdleThread.cpp b/xpcom/threads/LazyIdleThread.cpp
new file mode 100644
index 0000000000..4187fcc4ff
--- /dev/null
+++ b/xpcom/threads/LazyIdleThread.cpp
@@ -0,0 +1,126 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "LazyIdleThread.h"
+
+#include "nsIObserverService.h"
+#include "nsServiceManagerUtils.h"
+#include "nsThreadUtils.h"
+
+#ifdef DEBUG
+# define ASSERT_OWNING_THREAD() \
+ do { \
+ MOZ_ASSERT(mOwningEventTarget->IsOnCurrentThread()); \
+ } while (0)
+#else
+# define ASSERT_OWNING_THREAD() /* nothing */
+#endif
+
+namespace mozilla {
+
+LazyIdleThread::LazyIdleThread(uint32_t aIdleTimeoutMS, const char* aName,
+ ShutdownMethod aShutdownMethod)
+ : mOwningEventTarget(GetCurrentSerialEventTarget()),
+ mThreadPool(new nsThreadPool()),
+ mTaskQueue(TaskQueue::Create(do_AddRef(mThreadPool), aName)) {
+ // Configure the threadpool to host a single thread. It will be responsible
+ // for managing the thread's lifetime.
+ MOZ_ALWAYS_SUCCEEDS(mThreadPool->SetThreadLimit(1));
+ MOZ_ALWAYS_SUCCEEDS(mThreadPool->SetIdleThreadLimit(1));
+ MOZ_ALWAYS_SUCCEEDS(mThreadPool->SetIdleThreadTimeout(aIdleTimeoutMS));
+ MOZ_ALWAYS_SUCCEEDS(mThreadPool->SetName(nsDependentCString(aName)));
+
+ if (aShutdownMethod == ShutdownMethod::AutomaticShutdown &&
+ NS_IsMainThread()) {
+ if (nsCOMPtr<nsIObserverService> obs =
+ do_GetService(NS_OBSERVERSERVICE_CONTRACTID)) {
+ MOZ_ALWAYS_SUCCEEDS(
+ obs->AddObserver(this, "xpcom-shutdown-threads", false));
+ }
+ }
+}
+
+static void LazyIdleThreadShutdown(nsThreadPool* aThreadPool,
+ TaskQueue* aTaskQueue) {
+ aTaskQueue->BeginShutdown();
+ aTaskQueue->AwaitShutdownAndIdle();
+ aThreadPool->Shutdown();
+}
+
+LazyIdleThread::~LazyIdleThread() {
+ if (!mShutdown) {
+ mOwningEventTarget->Dispatch(NS_NewRunnableFunction(
+ "LazyIdleThread::~LazyIdleThread",
+ [threadPool = mThreadPool, taskQueue = mTaskQueue] {
+ LazyIdleThreadShutdown(threadPool, taskQueue);
+ }));
+ }
+}
+
+void LazyIdleThread::Shutdown() {
+ ASSERT_OWNING_THREAD();
+
+ if (!mShutdown) {
+ mShutdown = true;
+ LazyIdleThreadShutdown(mThreadPool, mTaskQueue);
+ }
+}
+
+nsresult LazyIdleThread::SetListener(nsIThreadPoolListener* aListener) {
+ return mThreadPool->SetListener(aListener);
+}
+
+NS_IMPL_ISUPPORTS(LazyIdleThread, nsIEventTarget, nsISerialEventTarget,
+ nsIObserver)
+
+NS_IMETHODIMP
+LazyIdleThread::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) {
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return Dispatch(event.forget(), aFlags);
+}
+
+NS_IMETHODIMP
+LazyIdleThread::Dispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aFlags) {
+ return mTaskQueue->Dispatch(std::move(aEvent), aFlags);
+}
+
+NS_IMETHODIMP
+LazyIdleThread::DelayedDispatch(already_AddRefed<nsIRunnable>, uint32_t) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::RegisterShutdownTask(nsITargetShutdownTask* aTask) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::UnregisterShutdownTask(nsITargetShutdownTask* aTask) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::IsOnCurrentThread(bool* aIsOnCurrentThread) {
+ return mTaskQueue->IsOnCurrentThread(aIsOnCurrentThread);
+}
+
+NS_IMETHODIMP_(bool)
+LazyIdleThread::IsOnCurrentThreadInfallible() {
+ return mTaskQueue->IsOnCurrentThreadInfallible();
+}
+
+NS_IMETHODIMP
+LazyIdleThread::Observe(nsISupports* /* aSubject */, const char* aTopic,
+ const char16_t* /* aData */) {
+ MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!");
+ MOZ_ASSERT(!strcmp("xpcom-shutdown-threads", aTopic), "Bad topic!");
+
+ Shutdown();
+ return NS_OK;
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/LazyIdleThread.h b/xpcom/threads/LazyIdleThread.h
new file mode 100644
index 0000000000..8a720bc69b
--- /dev/null
+++ b/xpcom/threads/LazyIdleThread.h
@@ -0,0 +1,93 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_lazyidlethread_h__
+#define mozilla_lazyidlethread_h__
+
+#ifndef MOZILLA_INTERNAL_API
+# error "This header is only usable from within libxul (MOZILLA_INTERNAL_API)."
+#endif
+
+#include "mozilla/TaskQueue.h"
+#include "nsIObserver.h"
+#include "nsThreadPool.h"
+
+namespace mozilla {
+
+/**
+ * This class provides a basic event target that creates its thread lazily and
+ * destroys its thread after a period of inactivity. It may be created and used
+ * on any thread but it may only be shut down from the thread on which it is
+ * created.
+ */
+class LazyIdleThread final : public nsISerialEventTarget, public nsIObserver {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIEVENTTARGET_FULL
+ NS_DECL_NSIOBSERVER
+
+ /**
+ * If AutomaticShutdown is specified, and the LazyIdleThread is created on the
+ * main thread, Shutdown() will automatically be called during
+ * xpcom-shutdown-threads.
+ */
+ enum ShutdownMethod { AutomaticShutdown = 0, ManualShutdown };
+
+ /**
+ * Create a new LazyIdleThread that will destroy its thread after the given
+ * number of milliseconds.
+ */
+ LazyIdleThread(uint32_t aIdleTimeoutMS, const char* aName,
+ ShutdownMethod aShutdownMethod = AutomaticShutdown);
+
+ /**
+ * Shuts down the LazyIdleThread, waiting for any pending work to complete.
+ * Must be called from mOwningEventTarget.
+ */
+ void Shutdown();
+
+ /**
+ * Register a nsIThreadPoolListener on the underlying threadpool to track the
+ * thread as it is created/destroyed.
+ */
+ nsresult SetListener(nsIThreadPoolListener* aListener);
+
+ private:
+ /**
+ * Asynchronously shuts down the LazyIdleThread on mOwningEventTarget.
+ */
+ ~LazyIdleThread();
+
+ /**
+ * The thread which created this LazyIdleThread and is responsible for
+ * shutting it down.
+ */
+ const nsCOMPtr<nsISerialEventTarget> mOwningEventTarget;
+
+ /**
+ * The single-thread backing threadpool which provides the actual threads used
+ * by LazyIdleThread, and implements the timeout.
+ */
+ const RefPtr<nsThreadPool> mThreadPool;
+
+ /**
+ * The serial event target providing a `nsISerialEventTarget` implementation
+ * when on the LazyIdleThread.
+ */
+ const RefPtr<TaskQueue> mTaskQueue;
+
+ /**
+ * Only safe to access on the owning thread or in the destructor (as no other
+ * threads have access then). If `true`, means the LazyIdleThread has already
+ * been shut down, so it does not need to be shut down asynchronously from the
+ * destructor.
+ */
+ bool mShutdown = false;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_lazyidlethread_h__
diff --git a/xpcom/threads/LeakRefPtr.h b/xpcom/threads/LeakRefPtr.h
new file mode 100644
index 0000000000..ee260dad7e
--- /dev/null
+++ b/xpcom/threads/LeakRefPtr.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Smart pointer which leaks its owning refcounted object by default. */
+
+#ifndef LeakRefPtr_h
+#define LeakRefPtr_h
+
+#include "mozilla/AlreadyAddRefed.h"
+
+namespace mozilla {
+
+/**
+ * Instance of this class behaves like a raw pointer which leaks the
+ * resource it's owning if not explicitly released.
+ */
+template <class T>
+class LeakRefPtr {
+ public:
+ explicit LeakRefPtr(already_AddRefed<T>&& aPtr) : mRawPtr(aPtr.take()) {}
+
+ explicit operator bool() const { return !!mRawPtr; }
+
+ LeakRefPtr<T>& operator=(already_AddRefed<T>&& aPtr) {
+ mRawPtr = aPtr.take();
+ return *this;
+ }
+
+ T* get() const { return mRawPtr; }
+
+ already_AddRefed<T> take() {
+ T* rawPtr = mRawPtr;
+ mRawPtr = nullptr;
+ return already_AddRefed<T>(rawPtr);
+ }
+
+ void release() { NS_RELEASE(mRawPtr); }
+
+ private:
+ T* MOZ_OWNING_REF mRawPtr;
+};
+
+} // namespace mozilla
+
+#endif // LeakRefPtr_h
diff --git a/xpcom/threads/MainThreadIdlePeriod.cpp b/xpcom/threads/MainThreadIdlePeriod.cpp
new file mode 100644
index 0000000000..0a25647285
--- /dev/null
+++ b/xpcom/threads/MainThreadIdlePeriod.cpp
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MainThreadIdlePeriod.h"
+
+#include "mozilla/Maybe.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/StaticPrefs_idle_period.h"
+#include "mozilla/dom/Document.h"
+#include "VRManagerChild.h"
+#include "nsRefreshDriver.h"
+#include "nsThreadUtils.h"
+
+// The amount of idle time (milliseconds) reserved for a long idle period.
+static const double kLongIdlePeriodMS = 50.0;
+
+// The minimum amount of time (milliseconds) required for an idle period to be
+// scheduled on the main thread. N.B. layout.idle_period.time_limit adds
+// padding at the end of the idle period, which makes the point in time that we
+// expect to become busy again be:
+// now + idle_period.min + layout.idle_period.time_limit
+// or during page load
+// now + idle_period.during_page_load.min + layout.idle_period.time_limit
+
+static const uint32_t kMaxTimerThreadBound = 5; // milliseconds
+
+namespace mozilla {
+
+NS_IMETHODIMP
+MainThreadIdlePeriod::GetIdlePeriodHint(TimeStamp* aIdleDeadline) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(aIdleDeadline);
+
+ TimeStamp now = TimeStamp::Now();
+ TimeStamp currentGuess =
+ now + TimeDuration::FromMilliseconds(kLongIdlePeriodMS);
+
+ currentGuess = nsRefreshDriver::GetIdleDeadlineHint(
+ currentGuess, nsRefreshDriver::IdleCheck::AllVsyncListeners);
+ if (XRE_IsContentProcess()) {
+ currentGuess = gfx::VRManagerChild::GetIdleDeadlineHint(currentGuess);
+ }
+ currentGuess = NS_GetTimerDeadlineHintOnCurrentThread(currentGuess,
+ kMaxTimerThreadBound);
+
+ // If the idle period is too small, then just return a null time
+ // to indicate we are busy. Otherwise return the actual deadline.
+ //
+ // If we're in high frequency rate mode, idle.period.min isn't used but limit
+ // is 1.
+ TimeDuration minIdlePeriod = TimeDuration::FromMilliseconds(
+ nsRefreshDriver::IsInHighRateMode() ? 1 : StaticPrefs::idle_period_min());
+ bool busySoon = currentGuess.IsNull() ||
+ (now >= (currentGuess - minIdlePeriod)) ||
+ currentGuess < mLastIdleDeadline;
+
+ // During page load use higher minimum idle period.
+ if (!busySoon && XRE_IsContentProcess() &&
+ mozilla::dom::Document::HasRecentlyStartedForegroundLoads()) {
+ TimeDuration minIdlePeriod = TimeDuration::FromMilliseconds(
+ StaticPrefs::idle_period_during_page_load_min());
+ busySoon = (now >= (currentGuess - minIdlePeriod));
+ }
+
+ if (!busySoon) {
+ *aIdleDeadline = mLastIdleDeadline = currentGuess;
+ }
+
+ return NS_OK;
+}
+
+/* static */
+float MainThreadIdlePeriod::GetLongIdlePeriod() { return kLongIdlePeriodMS; }
+
+} // namespace mozilla
diff --git a/xpcom/threads/MainThreadIdlePeriod.h b/xpcom/threads/MainThreadIdlePeriod.h
new file mode 100644
index 0000000000..7e382f6771
--- /dev/null
+++ b/xpcom/threads/MainThreadIdlePeriod.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_dom_mainthreadidleperiod_h
+#define mozilla_dom_mainthreadidleperiod_h
+
+#include "mozilla/TimeStamp.h"
+#include "nsThreadUtils.h"
+
+namespace mozilla {
+
+class MainThreadIdlePeriod final : public IdlePeriod {
+ public:
+ NS_DECL_NSIIDLEPERIOD
+
+ MainThreadIdlePeriod() : mLastIdleDeadline(TimeStamp::Now()) {}
+
+ static float GetLongIdlePeriod();
+
+ private:
+ virtual ~MainThreadIdlePeriod() = default;
+
+ TimeStamp mLastIdleDeadline;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_dom_mainthreadidleperiod_h
diff --git a/xpcom/threads/MainThreadUtils.h b/xpcom/threads/MainThreadUtils.h
new file mode 100644
index 0000000000..152805eb66
--- /dev/null
+++ b/xpcom/threads/MainThreadUtils.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MainThreadUtils_h_
+#define MainThreadUtils_h_
+
+#include "mozilla/Assertions.h"
+#include "mozilla/ThreadSafety.h"
+#include "nscore.h"
+
+class nsIThread;
+
+/**
+ * Get a reference to the main thread.
+ *
+ * @param aResult
+ * The resulting nsIThread object.
+ */
+extern nsresult NS_GetMainThread(nsIThread** aResult);
+
+#ifdef MOZILLA_INTERNAL_API
+bool NS_IsMainThreadTLSInitialized();
+extern "C" {
+bool NS_IsMainThread();
+}
+
+namespace mozilla {
+
+/**
+ * A dummy static capability for the thread safety analysis which can be
+ * required by functions and members using `MOZ_REQUIRE(sMainThreadCapability)`
+ * and `MOZ_GUARDED_BY(sMainThreadCapability)` and asserted using
+ * `AssertIsOnMainThread()`.
+ *
+ * If you want a thread-safety-analysis capability for a non-main thread,
+ * consider using the `EventTargetCapability` type.
+ */
+class MOZ_CAPABILITY("main thread") MainThreadCapability final {};
+constexpr MainThreadCapability sMainThreadCapability;
+
+# ifdef DEBUG
+void AssertIsOnMainThread() MOZ_ASSERT_CAPABILITY(sMainThreadCapability);
+# else
+inline void AssertIsOnMainThread()
+ MOZ_ASSERT_CAPABILITY(sMainThreadCapability) {}
+# endif
+
+inline void ReleaseAssertIsOnMainThread()
+ MOZ_ASSERT_CAPABILITY(sMainThreadCapability) {
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
+}
+
+} // namespace mozilla
+
+#endif
+
+#endif // MainThreadUtils_h_
diff --git a/xpcom/threads/Monitor.h b/xpcom/threads/Monitor.h
new file mode 100644
index 0000000000..7bb91f9ad7
--- /dev/null
+++ b/xpcom/threads/Monitor.h
@@ -0,0 +1,316 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_Monitor_h
+#define mozilla_Monitor_h
+
+#include "mozilla/CondVar.h"
+#include "mozilla/Mutex.h"
+
+namespace mozilla {
+
+/**
+ * Monitor provides a *non*-reentrant monitor: *not* a Java-style
+ * monitor. If your code needs support for reentrancy, use
+ * ReentrantMonitor instead. (Rarely should reentrancy be needed.)
+ *
+ * Instead of directly calling Monitor methods, it's safer and simpler
+ * to instead use the RAII wrappers MonitorAutoLock and
+ * MonitorAutoUnlock.
+ */
+class MOZ_CAPABILITY("monitor") Monitor {
+ public:
+ explicit Monitor(const char* aName)
+ : mMutex(aName), mCondVar(mMutex, "[Monitor.mCondVar]") {}
+
+ ~Monitor() = default;
+
+ void Lock() MOZ_CAPABILITY_ACQUIRE() { mMutex.Lock(); }
+ [[nodiscard]] bool TryLock() MOZ_TRY_ACQUIRE(true) {
+ return mMutex.TryLock();
+ }
+ void Unlock() MOZ_CAPABILITY_RELEASE() { mMutex.Unlock(); }
+
+ void Wait() MOZ_REQUIRES(this) { mCondVar.Wait(); }
+ CVStatus Wait(TimeDuration aDuration) MOZ_REQUIRES(this) {
+ return mCondVar.Wait(aDuration);
+ }
+
+ void Notify() { mCondVar.Notify(); }
+ void NotifyAll() { mCondVar.NotifyAll(); }
+
+ void AssertCurrentThreadOwns() const MOZ_ASSERT_CAPABILITY(this) {
+ mMutex.AssertCurrentThreadOwns();
+ }
+ void AssertNotCurrentThreadOwns() const MOZ_ASSERT_CAPABILITY(!this) {
+ mMutex.AssertNotCurrentThreadOwns();
+ }
+
+ private:
+ Monitor() = delete;
+ Monitor(const Monitor&) = delete;
+ Monitor& operator=(const Monitor&) = delete;
+
+ Mutex mMutex;
+ CondVar mCondVar;
+};
+
+/**
+ * MonitorSingleWriter
+ *
+ * Monitor where a single writer exists, so that reads from the same thread
+ * will not generate data races or consistency issues.
+ *
+ * When possible, use MonitorAutoLock/MonitorAutoUnlock to lock/unlock this
+ * monitor within a scope, instead of calling Lock/Unlock directly.
+ *
+ * This requires an object implementing Mutex's SingleWriterLockOwner, so
+ * we can do correct-thread checks.
+ */
+
+class MonitorSingleWriter : public Monitor {
+ public:
+ // aOwner should be the object that contains the mutex, typically. We
+ // will use that object (which must have a lifetime the same or greater
+ // than this object) to verify that we're running on the correct thread,
+ // typically only in DEBUG builds
+ explicit MonitorSingleWriter(const char* aName, SingleWriterLockOwner* aOwner)
+ : Monitor(aName)
+#ifdef DEBUG
+ ,
+ mOwner(aOwner)
+#endif
+ {
+ MOZ_COUNT_CTOR(MonitorSingleWriter);
+ MOZ_ASSERT(mOwner);
+ }
+
+ MOZ_COUNTED_DTOR(MonitorSingleWriter)
+
+ void AssertOnWritingThread() const MOZ_ASSERT_CAPABILITY(this) {
+ MOZ_ASSERT(mOwner->OnWritingThread());
+ }
+ void AssertOnWritingThreadOrHeld() const MOZ_ASSERT_CAPABILITY(this) {
+#ifdef DEBUG
+ if (!mOwner->OnWritingThread()) {
+ AssertCurrentThreadOwns();
+ }
+#endif
+ }
+
+ private:
+#ifdef DEBUG
+ SingleWriterLockOwner* mOwner MOZ_UNSAFE_REF(
+ "This is normally the object that contains the MonitorSingleWriter, so "
+ "we don't want to hold a reference to ourselves");
+#endif
+
+ MonitorSingleWriter() = delete;
+ MonitorSingleWriter(const MonitorSingleWriter&) = delete;
+ MonitorSingleWriter& operator=(const MonitorSingleWriter&) = delete;
+};
+
+/**
+ * Lock the monitor for the lexical scope instances of this class are
+ * bound to (except for MonitorAutoUnlock in nested scopes).
+ *
+ * The monitor must be unlocked when instances of this class are
+ * created.
+ */
+namespace detail {
+template <typename T>
+class MOZ_SCOPED_CAPABILITY MOZ_STACK_CLASS BaseMonitorAutoLock {
+ public:
+ explicit BaseMonitorAutoLock(T& aMonitor) MOZ_CAPABILITY_ACQUIRE(aMonitor)
+ : mMonitor(&aMonitor) {
+ mMonitor->Lock();
+ }
+
+ ~BaseMonitorAutoLock() MOZ_CAPABILITY_RELEASE() { mMonitor->Unlock(); }
+ // It's very hard to mess up MonitorAutoLock lock(mMonitor); ... lock.Wait().
+ // The only way you can fail to hold the lock when you call lock.Wait() is to
+ // use MonitorAutoUnlock. For now we'll ignore that case.
+ void Wait() {
+ mMonitor->AssertCurrentThreadOwns();
+ mMonitor->Wait();
+ }
+ CVStatus Wait(TimeDuration aDuration) {
+ mMonitor->AssertCurrentThreadOwns();
+ return mMonitor->Wait(aDuration);
+ }
+
+ void Notify() { mMonitor->Notify(); }
+ void NotifyAll() { mMonitor->NotifyAll(); }
+
+ // Assert that aLock is the monitor passed to the constructor and that the
+ // current thread owns the monitor. In coding patterns such as:
+ //
+ // void LockedMethod(const BaseAutoLock<T>& aProofOfLock)
+ // {
+ // aProofOfLock.AssertOwns(mMonitor);
+ // ...
+ // }
+ //
+ // Without this assertion, it could be that mMonitor is not actually
+ // locked. It's possible to have code like:
+ //
+ // BaseAutoLock lock(someMonitor);
+ // ...
+ // BaseAutoUnlock unlock(someMonitor);
+ // ...
+ // LockedMethod(lock);
+ //
+ // and in such a case, simply asserting that the monitor pointers match is not
+ // sufficient; monitor ownership must be asserted as well.
+ //
+ // Note that if you are going to use the coding pattern presented above, you
+ // should use this method in preference to using AssertCurrentThreadOwns on
+ // the mutex you expected to be held, since this method provides stronger
+ // guarantees.
+ void AssertOwns(const T& aMonitor) const MOZ_ASSERT_CAPABILITY(aMonitor) {
+ MOZ_ASSERT(&aMonitor == mMonitor);
+ mMonitor->AssertCurrentThreadOwns();
+ }
+
+ private:
+ BaseMonitorAutoLock() = delete;
+ BaseMonitorAutoLock(const BaseMonitorAutoLock&) = delete;
+ BaseMonitorAutoLock& operator=(const BaseMonitorAutoLock&) = delete;
+ static void* operator new(size_t) noexcept(true);
+
+ friend class MonitorAutoUnlock;
+
+ protected:
+ T* mMonitor;
+};
+} // namespace detail
+typedef detail::BaseMonitorAutoLock<Monitor> MonitorAutoLock;
+typedef detail::BaseMonitorAutoLock<MonitorSingleWriter>
+ MonitorSingleWriterAutoLock;
+
+// clang-format off
+// Use if we've done AssertOnWritingThread(), and then later need to take the
+// lock to write to a protected member. Instead of
+// MutexSingleWriterAutoLock lock(mutex)
+// use
+// MutexSingleWriterAutoLockOnThread(lock, mutex)
+// clang-format on
+#define MonitorSingleWriterAutoLockOnThread(lock, monitor) \
+ MOZ_PUSH_IGNORE_THREAD_SAFETY \
+ MonitorSingleWriterAutoLock lock(monitor); \
+ MOZ_POP_THREAD_SAFETY
+
+/**
+ * Unlock the monitor for the lexical scope instances of this class
+ * are bound to (except for MonitorAutoLock in nested scopes).
+ *
+ * The monitor must be locked by the current thread when instances of
+ * this class are created.
+ */
+namespace detail {
+template <typename T>
+class MOZ_STACK_CLASS MOZ_SCOPED_CAPABILITY BaseMonitorAutoUnlock {
+ public:
+ explicit BaseMonitorAutoUnlock(T& aMonitor)
+ MOZ_SCOPED_UNLOCK_RELEASE(aMonitor)
+ : mMonitor(&aMonitor) {
+ mMonitor->Unlock();
+ }
+
+ ~BaseMonitorAutoUnlock() MOZ_SCOPED_UNLOCK_REACQUIRE() { mMonitor->Lock(); }
+
+ private:
+ BaseMonitorAutoUnlock() = delete;
+ BaseMonitorAutoUnlock(const BaseMonitorAutoUnlock&) = delete;
+ BaseMonitorAutoUnlock& operator=(const BaseMonitorAutoUnlock&) = delete;
+ static void* operator new(size_t) noexcept(true);
+
+ T* mMonitor;
+};
+} // namespace detail
+typedef detail::BaseMonitorAutoUnlock<Monitor> MonitorAutoUnlock;
+typedef detail::BaseMonitorAutoUnlock<MonitorSingleWriter>
+ MonitorSingleWriterAutoUnlock;
+
+/**
+ * Lock the monitor for the lexical scope instances of this class are
+ * bound to (except for MonitorAutoUnlock in nested scopes).
+ *
+ * The monitor must be unlocked when instances of this class are
+ * created.
+ */
+class MOZ_SCOPED_CAPABILITY MOZ_STACK_CLASS ReleasableMonitorAutoLock {
+ public:
+ explicit ReleasableMonitorAutoLock(Monitor& aMonitor)
+ MOZ_CAPABILITY_ACQUIRE(aMonitor)
+ : mMonitor(&aMonitor) {
+ mMonitor->Lock();
+ mLocked = true;
+ }
+
+ ~ReleasableMonitorAutoLock() MOZ_CAPABILITY_RELEASE() {
+ if (mLocked) {
+ mMonitor->Unlock();
+ }
+ }
+
+ // See BaseMonitorAutoLock::Wait
+ void Wait() {
+ mMonitor->AssertCurrentThreadOwns(); // someone could have called Unlock()
+ mMonitor->Wait();
+ }
+ CVStatus Wait(TimeDuration aDuration) {
+ mMonitor->AssertCurrentThreadOwns();
+ return mMonitor->Wait(aDuration);
+ }
+
+ void Notify() {
+ MOZ_ASSERT(mLocked);
+ mMonitor->Notify();
+ }
+ void NotifyAll() {
+ MOZ_ASSERT(mLocked);
+ mMonitor->NotifyAll();
+ }
+
+ // Allow dropping the lock prematurely; for example to support something like:
+ // clang-format off
+ // MonitorAutoLock lock(mMonitor);
+ // ...
+ // if (foo) {
+ // lock.Unlock();
+ // MethodThatCantBeCalledWithLock()
+ // return;
+ // }
+ // clang-format on
+ void Unlock() MOZ_CAPABILITY_RELEASE() {
+ MOZ_ASSERT(mLocked);
+ mMonitor->Unlock();
+ mLocked = false;
+ }
+ void Lock() MOZ_CAPABILITY_ACQUIRE() {
+ MOZ_ASSERT(!mLocked);
+ mMonitor->Lock();
+ mLocked = true;
+ }
+ void AssertCurrentThreadOwns() const MOZ_ASSERT_CAPABILITY() {
+ mMonitor->AssertCurrentThreadOwns();
+ }
+
+ private:
+ bool mLocked;
+ Monitor* mMonitor;
+
+ ReleasableMonitorAutoLock() = delete;
+ ReleasableMonitorAutoLock(const ReleasableMonitorAutoLock&) = delete;
+ ReleasableMonitorAutoLock& operator=(const ReleasableMonitorAutoLock&) =
+ delete;
+ static void* operator new(size_t) noexcept(true);
+};
+
+} // namespace mozilla
+
+#endif // mozilla_Monitor_h
diff --git a/xpcom/threads/MozPromise.h b/xpcom/threads/MozPromise.h
new file mode 100644
index 0000000000..c0b0addf0a
--- /dev/null
+++ b/xpcom/threads/MozPromise.h
@@ -0,0 +1,1763 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MozPromise_h_)
+# define MozPromise_h_
+
+# include <type_traits>
+# include <utility>
+
+# include "mozilla/ErrorNames.h"
+# include "mozilla/Logging.h"
+# include "mozilla/Maybe.h"
+# include "mozilla/Monitor.h"
+# include "mozilla/Mutex.h"
+# include "mozilla/RefPtr.h"
+# include "mozilla/UniquePtr.h"
+# include "mozilla/Variant.h"
+# include "nsIDirectTaskDispatcher.h"
+# include "nsISerialEventTarget.h"
+# include "nsTArray.h"
+# include "nsThreadUtils.h"
+
+# ifdef MOZ_WIDGET_ANDROID
+# include "mozilla/jni/GeckoResultUtils.h"
+# endif
+
+# if MOZ_DIAGNOSTIC_ASSERT_ENABLED
+# define PROMISE_DEBUG
+# endif
+
+# ifdef PROMISE_DEBUG
+# define PROMISE_ASSERT MOZ_RELEASE_ASSERT
+# else
+# define PROMISE_ASSERT(...) \
+ do { \
+ } while (0)
+# endif
+
+# if DEBUG
+# include "nsPrintfCString.h"
+# endif
+
+namespace mozilla {
+
+namespace dom {
+class Promise;
+}
+
+extern LazyLogModule gMozPromiseLog;
+
+# define PROMISE_LOG(x, ...) \
+ MOZ_LOG(gMozPromiseLog, mozilla::LogLevel::Debug, (x, ##__VA_ARGS__))
+
+namespace detail {
+template <typename F>
+struct MethodTraitsHelper : MethodTraitsHelper<decltype(&F::operator())> {};
+template <typename ThisType, typename Ret, typename... ArgTypes>
+struct MethodTraitsHelper<Ret (ThisType::*)(ArgTypes...)> {
+ using ReturnType = Ret;
+ static const size_t ArgSize = sizeof...(ArgTypes);
+};
+template <typename ThisType, typename Ret, typename... ArgTypes>
+struct MethodTraitsHelper<Ret (ThisType::*)(ArgTypes...) const> {
+ using ReturnType = Ret;
+ static const size_t ArgSize = sizeof...(ArgTypes);
+};
+template <typename ThisType, typename Ret, typename... ArgTypes>
+struct MethodTraitsHelper<Ret (ThisType::*)(ArgTypes...) volatile> {
+ using ReturnType = Ret;
+ static const size_t ArgSize = sizeof...(ArgTypes);
+};
+template <typename ThisType, typename Ret, typename... ArgTypes>
+struct MethodTraitsHelper<Ret (ThisType::*)(ArgTypes...) const volatile> {
+ using ReturnType = Ret;
+ static const size_t ArgSize = sizeof...(ArgTypes);
+};
+template <typename T>
+struct MethodTrait : MethodTraitsHelper<std::remove_reference_t<T>> {};
+
+} // namespace detail
+
+template <typename MethodType>
+using TakesArgument =
+ std::integral_constant<bool, detail::MethodTrait<MethodType>::ArgSize != 0>;
+
+template <typename MethodType, typename TargetType>
+using ReturnTypeIs =
+ std::is_convertible<typename detail::MethodTrait<MethodType>::ReturnType,
+ TargetType>;
+
+template <typename ResolveValueT, typename RejectValueT, bool IsExclusive>
+class MozPromise;
+
+template <typename Return>
+struct IsMozPromise : std::false_type {};
+
+template <typename ResolveValueT, typename RejectValueT, bool IsExclusive>
+struct IsMozPromise<MozPromise<ResolveValueT, RejectValueT, IsExclusive>>
+ : std::true_type {};
+
+/*
+ * A promise manages an asynchronous request that may or may not be able to be
+ * fulfilled immediately. When an API returns a promise, the consumer may attach
+ * callbacks to be invoked (asynchronously, on a specified thread) when the
+ * request is either completed (resolved) or cannot be completed (rejected).
+ * Whereas JS promise callbacks are dispatched from Microtask checkpoints,
+ * MozPromises resolution/rejection make a normal round-trip through the event
+ * loop, which simplifies their ordering semantics relative to other native
+ * code.
+ *
+ * MozPromises attempt to mirror the spirit of JS Promises to the extent that
+ * is possible (and desirable) in C++. While the intent is that MozPromises
+ * feel familiar to programmers who are accustomed to their JS-implemented
+ * cousin, we don't shy away from imposing restrictions and adding features that
+ * make sense for the use cases we encounter.
+ *
+ * A MozPromise is ThreadSafe, and may be ->Then()ed on any thread. The Then()
+ * call accepts resolve and reject callbacks, and returns a magic object which
+ * will be implicitly converted to a MozPromise::Request or a MozPromise object
+ * depending on how the return value is used. The magic object serves several
+ * purposes for the consumer.
+ *
+ * (1) When converting to a MozPromise::Request, it allows the caller to
+ * cancel the delivery of the resolve/reject value if it has not already
+ * occurred, via Disconnect() (this must be done on the target thread to
+ * avoid racing).
+ *
+ * (2) When converting to a MozPromise (which is called a completion promise),
+ * it allows promise chaining so ->Then() can be called again to attach
+ * more resolve and reject callbacks. If the resolve/reject callback
+ * returns a new MozPromise, that promise is chained to the completion
+ * promise, such that its resolve/reject value will be forwarded along
+ * when it arrives. If the resolve/reject callback returns void, the
+ * completion promise is resolved/rejected with the same value that was
+ * passed to the callback.
+ *
+ * The MozPromise APIs skirt traditional XPCOM convention by returning nsRefPtrs
+ * (rather than already_AddRefed) from various methods. This is done to allow
+ * elegant chaining of calls without cluttering up the code with intermediate
+ * variables, and without introducing separate API variants for callers that
+ * want a return value (from, say, ->Then()) from those that don't.
+ *
+ * When IsExclusive is true, the MozPromise does a release-mode assertion that
+ * there is at most one call to either Then(...) or ChainTo(...).
+ */
+
+class MozPromiseRefcountable {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MozPromiseRefcountable)
+ protected:
+ virtual ~MozPromiseRefcountable() = default;
+};
+
+class MozPromiseBase : public MozPromiseRefcountable {
+ public:
+ virtual void AssertIsDead() = 0;
+};
+
+template <typename T>
+class MozPromiseHolder;
+template <typename T>
+class MozPromiseRequestHolder;
+template <typename ResolveValueT, typename RejectValueT, bool IsExclusive>
+class MozPromise : public MozPromiseBase {
+ static const uint32_t sMagic = 0xcecace11;
+
+ // Return a |T&&| to enable move when IsExclusive is true or
+ // a |const T&| to enforce copy otherwise.
+ template <typename T,
+ typename R = std::conditional_t<IsExclusive, T&&, const T&>>
+ static R MaybeMove(T& aX) {
+ return static_cast<R>(aX);
+ }
+
+ public:
+ typedef ResolveValueT ResolveValueType;
+ typedef RejectValueT RejectValueType;
+ class ResolveOrRejectValue {
+ public:
+ template <typename ResolveValueType_>
+ void SetResolve(ResolveValueType_&& aResolveValue) {
+ MOZ_ASSERT(IsNothing());
+ mValue = Storage(VariantIndex<ResolveIndex>{},
+ std::forward<ResolveValueType_>(aResolveValue));
+ }
+
+ template <typename RejectValueType_>
+ void SetReject(RejectValueType_&& aRejectValue) {
+ MOZ_ASSERT(IsNothing());
+ mValue = Storage(VariantIndex<RejectIndex>{},
+ std::forward<RejectValueType_>(aRejectValue));
+ }
+
+ template <typename ResolveValueType_>
+ static ResolveOrRejectValue MakeResolve(ResolveValueType_&& aResolveValue) {
+ ResolveOrRejectValue val;
+ val.SetResolve(std::forward<ResolveValueType_>(aResolveValue));
+ return val;
+ }
+
+ template <typename RejectValueType_>
+ static ResolveOrRejectValue MakeReject(RejectValueType_&& aRejectValue) {
+ ResolveOrRejectValue val;
+ val.SetReject(std::forward<RejectValueType_>(aRejectValue));
+ return val;
+ }
+
+ bool IsResolve() const { return mValue.template is<ResolveIndex>(); }
+ bool IsReject() const { return mValue.template is<RejectIndex>(); }
+ bool IsNothing() const { return mValue.template is<NothingIndex>(); }
+
+ const ResolveValueType& ResolveValue() const {
+ return mValue.template as<ResolveIndex>();
+ }
+ ResolveValueType& ResolveValue() {
+ return mValue.template as<ResolveIndex>();
+ }
+ const RejectValueType& RejectValue() const {
+ return mValue.template as<RejectIndex>();
+ }
+ RejectValueType& RejectValue() { return mValue.template as<RejectIndex>(); }
+
+ private:
+ enum { NothingIndex, ResolveIndex, RejectIndex };
+ using Storage = Variant<Nothing, ResolveValueType, RejectValueType>;
+ Storage mValue = Storage(VariantIndex<NothingIndex>{});
+ };
+
+ protected:
+ // MozPromise is the public type, and never constructed directly. Construct
+ // a MozPromise::Private, defined below.
+ MozPromise(const char* aCreationSite, bool aIsCompletionPromise)
+ : mCreationSite(aCreationSite),
+ mMutex("MozPromise Mutex"),
+ mHaveRequest(false),
+ mIsCompletionPromise(aIsCompletionPromise)
+# ifdef PROMISE_DEBUG
+ ,
+ mMagic4(&mMutex)
+# endif
+ {
+ PROMISE_LOG("%s creating MozPromise (%p)", mCreationSite, this);
+ }
+
+ public:
+ // MozPromise::Private allows us to separate the public interface (upon which
+ // consumers of the promise may invoke methods like Then()) from the private
+ // interface (upon which the creator of the promise may invoke Resolve() or
+ // Reject()). APIs should create and store a MozPromise::Private (usually
+ // via a MozPromiseHolder), and return a MozPromise to consumers.
+ //
+ // NB: We can include the definition of this class inline once B2G ICS is
+ // gone.
+ class Private;
+
+ template <typename ResolveValueType_>
+ [[nodiscard]] static RefPtr<MozPromise> CreateAndResolve(
+ ResolveValueType_&& aResolveValue, const char* aResolveSite) {
+ static_assert(std::is_convertible_v<ResolveValueType_, ResolveValueT>,
+ "Resolve() argument must be implicitly convertible to "
+ "MozPromise's ResolveValueT");
+ RefPtr<typename MozPromise::Private> p =
+ new MozPromise::Private(aResolveSite);
+ p->Resolve(std::forward<ResolveValueType_>(aResolveValue), aResolveSite);
+ return p;
+ }
+
+ template <typename RejectValueType_>
+ [[nodiscard]] static RefPtr<MozPromise> CreateAndReject(
+ RejectValueType_&& aRejectValue, const char* aRejectSite) {
+ static_assert(std::is_convertible_v<RejectValueType_, RejectValueT>,
+ "Reject() argument must be implicitly convertible to "
+ "MozPromise's RejectValueT");
+ RefPtr<typename MozPromise::Private> p =
+ new MozPromise::Private(aRejectSite);
+ p->Reject(std::forward<RejectValueType_>(aRejectValue), aRejectSite);
+ return p;
+ }
+
+ template <typename ResolveOrRejectValueType_>
+ [[nodiscard]] static RefPtr<MozPromise> CreateAndResolveOrReject(
+ ResolveOrRejectValueType_&& aValue, const char* aSite) {
+ RefPtr<typename MozPromise::Private> p = new MozPromise::Private(aSite);
+ p->ResolveOrReject(std::forward<ResolveOrRejectValueType_>(aValue), aSite);
+ return p;
+ }
+
+ typedef MozPromise<CopyableTArray<ResolveValueType>, RejectValueType,
+ IsExclusive>
+ AllPromiseType;
+
+ typedef MozPromise<CopyableTArray<ResolveOrRejectValue>, bool, IsExclusive>
+ AllSettledPromiseType;
+
+ private:
+ class AllPromiseHolder : public MozPromiseRefcountable {
+ public:
+ explicit AllPromiseHolder(size_t aDependentPromises)
+ : mPromise(new typename AllPromiseType::Private(__func__)),
+ mOutstandingPromises(aDependentPromises) {
+ MOZ_ASSERT(aDependentPromises > 0);
+ mResolveValues.SetLength(aDependentPromises);
+ }
+
+ template <typename ResolveValueType_>
+ void Resolve(size_t aIndex, ResolveValueType_&& aResolveValue) {
+ if (!mPromise) {
+ // Already rejected.
+ return;
+ }
+
+ mResolveValues[aIndex].emplace(
+ std::forward<ResolveValueType_>(aResolveValue));
+ if (--mOutstandingPromises == 0) {
+ nsTArray<ResolveValueType> resolveValues;
+ resolveValues.SetCapacity(mResolveValues.Length());
+ for (auto&& resolveValue : mResolveValues) {
+ resolveValues.AppendElement(std::move(resolveValue.ref()));
+ }
+
+ mPromise->Resolve(std::move(resolveValues), __func__);
+ mPromise = nullptr;
+ mResolveValues.Clear();
+ }
+ }
+
+ template <typename RejectValueType_>
+ void Reject(RejectValueType_&& aRejectValue) {
+ if (!mPromise) {
+ // Already rejected.
+ return;
+ }
+
+ mPromise->Reject(std::forward<RejectValueType_>(aRejectValue), __func__);
+ mPromise = nullptr;
+ mResolveValues.Clear();
+ }
+
+ AllPromiseType* Promise() { return mPromise; }
+
+ private:
+ nsTArray<Maybe<ResolveValueType>> mResolveValues;
+ RefPtr<typename AllPromiseType::Private> mPromise;
+ size_t mOutstandingPromises;
+ };
+
+ // Trying to pass ResolveOrRejectValue by value fails static analysis checks,
+ // so we need to use either a const& or an rvalue reference, depending on
+ // whether IsExclusive is true or not.
+ typedef std::conditional_t<IsExclusive, ResolveOrRejectValue&&,
+ const ResolveOrRejectValue&>
+ ResolveOrRejectValueParam;
+
+ typedef std::conditional_t<IsExclusive, ResolveValueType&&,
+ const ResolveValueType&>
+ ResolveValueTypeParam;
+
+ typedef std::conditional_t<IsExclusive, RejectValueType&&,
+ const RejectValueType&>
+ RejectValueTypeParam;
+
+ class AllSettledPromiseHolder : public MozPromiseRefcountable {
+ public:
+ explicit AllSettledPromiseHolder(size_t aDependentPromises)
+ : mPromise(new typename AllSettledPromiseType::Private(__func__)),
+ mOutstandingPromises(aDependentPromises) {
+ MOZ_ASSERT(aDependentPromises > 0);
+ mValues.SetLength(aDependentPromises);
+ }
+
+ void Settle(size_t aIndex, ResolveOrRejectValueParam aValue) {
+ if (!mPromise) {
+ // Already rejected.
+ return;
+ }
+
+ mValues[aIndex].emplace(MaybeMove(aValue));
+ if (--mOutstandingPromises == 0) {
+ nsTArray<ResolveOrRejectValue> values;
+ values.SetCapacity(mValues.Length());
+ for (auto&& value : mValues) {
+ values.AppendElement(std::move(value.ref()));
+ }
+
+ mPromise->Resolve(std::move(values), __func__);
+ mPromise = nullptr;
+ mValues.Clear();
+ }
+ }
+
+ AllSettledPromiseType* Promise() { return mPromise; }
+
+ private:
+ nsTArray<Maybe<ResolveOrRejectValue>> mValues;
+ RefPtr<typename AllSettledPromiseType::Private> mPromise;
+ size_t mOutstandingPromises;
+ };
+
+ public:
+ [[nodiscard]] static RefPtr<AllPromiseType> All(
+ nsISerialEventTarget* aProcessingTarget,
+ nsTArray<RefPtr<MozPromise>>& aPromises) {
+ if (aPromises.Length() == 0) {
+ return AllPromiseType::CreateAndResolve(
+ CopyableTArray<ResolveValueType>(), __func__);
+ }
+
+ RefPtr<AllPromiseHolder> holder = new AllPromiseHolder(aPromises.Length());
+ RefPtr<AllPromiseType> promise = holder->Promise();
+ for (size_t i = 0; i < aPromises.Length(); ++i) {
+ aPromises[i]->Then(
+ aProcessingTarget, __func__,
+ [holder, i](ResolveValueTypeParam aResolveValue) -> void {
+ holder->Resolve(i, MaybeMove(aResolveValue));
+ },
+ [holder](RejectValueTypeParam aRejectValue) -> void {
+ holder->Reject(MaybeMove(aRejectValue));
+ });
+ }
+ return promise;
+ }
+
+ [[nodiscard]] static RefPtr<AllSettledPromiseType> AllSettled(
+ nsISerialEventTarget* aProcessingTarget,
+ nsTArray<RefPtr<MozPromise>>& aPromises) {
+ if (aPromises.Length() == 0) {
+ return AllSettledPromiseType::CreateAndResolve(
+ CopyableTArray<ResolveOrRejectValue>(), __func__);
+ }
+
+ RefPtr<AllSettledPromiseHolder> holder =
+ new AllSettledPromiseHolder(aPromises.Length());
+ RefPtr<AllSettledPromiseType> promise = holder->Promise();
+ for (size_t i = 0; i < aPromises.Length(); ++i) {
+ aPromises[i]->Then(aProcessingTarget, __func__,
+ [holder, i](ResolveOrRejectValueParam aValue) -> void {
+ holder->Settle(i, MaybeMove(aValue));
+ });
+ }
+ return promise;
+ }
+
+ class Request : public MozPromiseRefcountable {
+ public:
+ virtual void Disconnect() = 0;
+
+ protected:
+ Request() : mComplete(false), mDisconnected(false) {}
+ virtual ~Request() = default;
+
+ bool mComplete;
+ bool mDisconnected;
+ };
+
+ protected:
+ /*
+ * A ThenValue tracks a single consumer waiting on the promise. When a
+ * consumer invokes promise->Then(...), a ThenValue is created. Once the
+ * Promise is resolved or rejected, a {Resolve,Reject}Runnable is dispatched,
+ * which invokes the resolve/reject method and then deletes the ThenValue.
+ */
+ class ThenValueBase : public Request {
+ friend class MozPromise;
+ static const uint32_t sMagic = 0xfadece11;
+
+ public:
+ class ResolveOrRejectRunnable final
+ : public PrioritizableCancelableRunnable {
+ public:
+ ResolveOrRejectRunnable(ThenValueBase* aThenValue, MozPromise* aPromise)
+ : PrioritizableCancelableRunnable(
+ aPromise->mPriority,
+ "MozPromise::ThenValueBase::ResolveOrRejectRunnable"),
+ mThenValue(aThenValue),
+ mPromise(aPromise) {
+ MOZ_DIAGNOSTIC_ASSERT(!mPromise->IsPending());
+ }
+
+ ~ResolveOrRejectRunnable() {
+ if (mThenValue) {
+ mThenValue->AssertIsDead();
+ }
+ }
+
+ NS_IMETHOD Run() override {
+ PROMISE_LOG("ResolveOrRejectRunnable::Run() [this=%p]", this);
+ mThenValue->DoResolveOrReject(mPromise->Value());
+ mThenValue = nullptr;
+ mPromise = nullptr;
+ return NS_OK;
+ }
+
+ nsresult Cancel() override { return Run(); }
+
+ private:
+ RefPtr<ThenValueBase> mThenValue;
+ RefPtr<MozPromise> mPromise;
+ };
+
+ ThenValueBase(nsISerialEventTarget* aResponseTarget, const char* aCallSite)
+ : mResponseTarget(aResponseTarget), mCallSite(aCallSite) {
+ MOZ_ASSERT(aResponseTarget);
+ }
+
+# ifdef PROMISE_DEBUG
+ ~ThenValueBase() {
+ mMagic1 = 0;
+ mMagic2 = 0;
+ }
+# endif
+
+ void AssertIsDead() {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic);
+ // We want to assert that this ThenValues is dead - that is to say, that
+ // there are no consumers waiting for the result. In the case of a normal
+ // ThenValue, we check that it has been disconnected, which is the way
+ // that the consumer signals that it no longer wishes to hear about the
+ // result. If this ThenValue has a completion promise (which is mutually
+ // exclusive with being disconnectable), we recursively assert that every
+ // ThenValue associated with the completion promise is dead.
+ if (MozPromiseBase* p = CompletionPromise()) {
+ p->AssertIsDead();
+ } else {
+# ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+ if (MOZ_UNLIKELY(!Request::mDisconnected)) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "MozPromise::ThenValue created from '%s' destroyed without being "
+ "either disconnected, resolved, or rejected (dispatchRv: %s)",
+ mCallSite,
+ mDispatchRv ? GetStaticErrorName(*mDispatchRv)
+ : "not dispatched");
+ }
+# endif
+ }
+ }
+
+ void Dispatch(MozPromise* aPromise) {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic);
+ aPromise->mMutex.AssertCurrentThreadOwns();
+ MOZ_ASSERT(!aPromise->IsPending());
+
+ nsCOMPtr<nsIRunnable> r = new ResolveOrRejectRunnable(this, aPromise);
+ PROMISE_LOG(
+ "%s Then() call made from %s [Runnable=%p, Promise=%p, ThenValue=%p] "
+ "%s dispatch",
+ aPromise->mValue.IsResolve() ? "Resolving" : "Rejecting", mCallSite,
+ r.get(), aPromise, this,
+ aPromise->mUseSynchronousTaskDispatch ? "synchronous"
+ : aPromise->mUseDirectTaskDispatch ? "directtask"
+ : "normal");
+
+ if (aPromise->mUseSynchronousTaskDispatch &&
+ mResponseTarget->IsOnCurrentThread()) {
+ PROMISE_LOG("ThenValue::Dispatch running task synchronously [this=%p]",
+ this);
+ r->Run();
+ return;
+ }
+
+ if (aPromise->mUseDirectTaskDispatch &&
+ mResponseTarget->IsOnCurrentThread()) {
+ PROMISE_LOG(
+ "ThenValue::Dispatch dispatch task via direct task queue [this=%p]",
+ this);
+ nsCOMPtr<nsIDirectTaskDispatcher> dispatcher =
+ do_QueryInterface(mResponseTarget);
+ if (dispatcher) {
+ SetDispatchRv(dispatcher->DispatchDirectTask(r.forget()));
+ return;
+ }
+ NS_WARNING(
+ nsPrintfCString(
+ "Direct Task dispatching not available for thread \"%s\"",
+ PR_GetThreadName(PR_GetCurrentThread()))
+ .get());
+ MOZ_DIAGNOSTIC_ASSERT(
+ false,
+ "mResponseTarget must implement nsIDirectTaskDispatcher for direct "
+ "task dispatching");
+ }
+
+ // Promise consumers are allowed to disconnect the Request object and
+ // then shut down the thread or task queue that the promise result would
+ // be dispatched on. So we unfortunately can't assert that promise
+ // dispatch succeeds. :-(
+ // We do record whether or not it succeeds so that if the ThenValueBase is
+ // then destroyed and it was not disconnected, we can include that
+ // information in the assertion message.
+ SetDispatchRv(mResponseTarget->Dispatch(r.forget()));
+ }
+
+ void Disconnect() override {
+ MOZ_DIAGNOSTIC_ASSERT(mResponseTarget->IsOnCurrentThread());
+ MOZ_DIAGNOSTIC_ASSERT(!Request::mComplete);
+ Request::mDisconnected = true;
+
+ // We could support rejecting the completion promise on disconnection, but
+ // then we'd need to have some sort of default reject value. The use cases
+ // of disconnection and completion promise chaining seem pretty
+ // orthogonal, so let's use assert against it.
+ MOZ_DIAGNOSTIC_ASSERT(!CompletionPromise());
+ }
+
+ protected:
+ virtual MozPromiseBase* CompletionPromise() const = 0;
+ virtual void DoResolveOrRejectInternal(ResolveOrRejectValue& aValue) = 0;
+
+ void DoResolveOrReject(ResolveOrRejectValue& aValue) {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic);
+ MOZ_DIAGNOSTIC_ASSERT(mResponseTarget->IsOnCurrentThread());
+ Request::mComplete = true;
+ if (Request::mDisconnected) {
+ PROMISE_LOG(
+ "ThenValue::DoResolveOrReject disconnected - bailing out [this=%p]",
+ this);
+ return;
+ }
+
+ // Invoke the resolve or reject method.
+ DoResolveOrRejectInternal(aValue);
+ }
+
+ void SetDispatchRv(nsresult aRv) {
+# ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+ mDispatchRv = Some(aRv);
+# endif
+ }
+
+ nsCOMPtr<nsISerialEventTarget>
+ mResponseTarget; // May be released on any thread.
+# ifdef PROMISE_DEBUG
+ uint32_t mMagic1 = sMagic;
+# endif
+ const char* mCallSite;
+# ifdef PROMISE_DEBUG
+ uint32_t mMagic2 = sMagic;
+# endif
+# ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+ Maybe<nsresult> mDispatchRv;
+# endif
+ };
+
+ /*
+ * We create two overloads for invoking Resolve/Reject Methods so as to
+ * make the resolve/reject value argument "optional".
+ */
+ template <typename ThisType, typename MethodType, typename ValueType>
+ static std::enable_if_t<TakesArgument<MethodType>::value,
+ typename detail::MethodTrait<MethodType>::ReturnType>
+ InvokeMethod(ThisType* aThisVal, MethodType aMethod, ValueType&& aValue) {
+ return (aThisVal->*aMethod)(std::forward<ValueType>(aValue));
+ }
+
+ template <typename ThisType, typename MethodType, typename ValueType>
+ static std::enable_if_t<!TakesArgument<MethodType>::value,
+ typename detail::MethodTrait<MethodType>::ReturnType>
+ InvokeMethod(ThisType* aThisVal, MethodType aMethod, ValueType&& aValue) {
+ return (aThisVal->*aMethod)();
+ }
+
+ // Called when promise chaining is supported.
+ template <bool SupportChaining, typename ThisType, typename MethodType,
+ typename ValueType, typename CompletionPromiseType>
+ static std::enable_if_t<SupportChaining, void> InvokeCallbackMethod(
+ ThisType* aThisVal, MethodType aMethod, ValueType&& aValue,
+ CompletionPromiseType&& aCompletionPromise) {
+ auto p = InvokeMethod(aThisVal, aMethod, std::forward<ValueType>(aValue));
+ if (aCompletionPromise) {
+ p->ChainTo(aCompletionPromise.forget(), "<chained completion promise>");
+ }
+ }
+
+ // Called when promise chaining is not supported.
+ template <bool SupportChaining, typename ThisType, typename MethodType,
+ typename ValueType, typename CompletionPromiseType>
+ static std::enable_if_t<!SupportChaining, void> InvokeCallbackMethod(
+ ThisType* aThisVal, MethodType aMethod, ValueType&& aValue,
+ CompletionPromiseType&& aCompletionPromise) {
+ MOZ_DIAGNOSTIC_ASSERT(
+ !aCompletionPromise,
+ "Can't do promise chaining for a non-promise-returning method.");
+ InvokeMethod(aThisVal, aMethod, std::forward<ValueType>(aValue));
+ }
+
+ template <typename>
+ class ThenCommand;
+
+ template <typename...>
+ class ThenValue;
+
+ template <typename ThisType, typename ResolveMethodType,
+ typename RejectMethodType>
+ class ThenValue<ThisType*, ResolveMethodType, RejectMethodType>
+ : public ThenValueBase {
+ friend class ThenCommand<ThenValue>;
+
+ using R1 = typename RemoveSmartPointer<
+ typename detail::MethodTrait<ResolveMethodType>::ReturnType>::Type;
+ using R2 = typename RemoveSmartPointer<
+ typename detail::MethodTrait<RejectMethodType>::ReturnType>::Type;
+ using SupportChaining =
+ std::integral_constant<bool, IsMozPromise<R1>::value &&
+ std::is_same_v<R1, R2>>;
+
+ // Fall back to MozPromise when promise chaining is not supported to make
+ // code compile.
+ using PromiseType =
+ std::conditional_t<SupportChaining::value, R1, MozPromise>;
+
+ public:
+ ThenValue(nsISerialEventTarget* aResponseTarget, ThisType* aThisVal,
+ ResolveMethodType aResolveMethod, RejectMethodType aRejectMethod,
+ const char* aCallSite)
+ : ThenValueBase(aResponseTarget, aCallSite),
+ mThisVal(aThisVal),
+ mResolveMethod(aResolveMethod),
+ mRejectMethod(aRejectMethod) {}
+
+ void Disconnect() override {
+ ThenValueBase::Disconnect();
+
+ // If a Request has been disconnected, we don't guarantee that the
+ // resolve/reject runnable will be dispatched. Null out our refcounted
+ // this-value now so that it's released predictably on the dispatch
+ // thread.
+ mThisVal = nullptr;
+ }
+
+ protected:
+ MozPromiseBase* CompletionPromise() const override {
+ return mCompletionPromise;
+ }
+
+ void DoResolveOrRejectInternal(ResolveOrRejectValue& aValue) override {
+ if (aValue.IsResolve()) {
+ InvokeCallbackMethod<SupportChaining::value>(
+ mThisVal.get(), mResolveMethod, MaybeMove(aValue.ResolveValue()),
+ std::move(mCompletionPromise));
+ } else {
+ InvokeCallbackMethod<SupportChaining::value>(
+ mThisVal.get(), mRejectMethod, MaybeMove(aValue.RejectValue()),
+ std::move(mCompletionPromise));
+ }
+
+ // Null out mThisVal after invoking the callback so that any references
+ // are released predictably on the dispatch thread. Otherwise, it would be
+ // released on whatever thread last drops its reference to the ThenValue,
+ // which may or may not be ok.
+ mThisVal = nullptr;
+ }
+
+ private:
+ RefPtr<ThisType>
+ mThisVal; // Only accessed and refcounted on dispatch thread.
+ ResolveMethodType mResolveMethod;
+ RejectMethodType mRejectMethod;
+ RefPtr<typename PromiseType::Private> mCompletionPromise;
+ };
+
+ template <typename ThisType, typename ResolveRejectMethodType>
+ class ThenValue<ThisType*, ResolveRejectMethodType> : public ThenValueBase {
+ friend class ThenCommand<ThenValue>;
+
+ using R1 = typename RemoveSmartPointer<typename detail::MethodTrait<
+ ResolveRejectMethodType>::ReturnType>::Type;
+ using SupportChaining =
+ std::integral_constant<bool, IsMozPromise<R1>::value>;
+
+ // Fall back to MozPromise when promise chaining is not supported to make
+ // code compile.
+ using PromiseType =
+ std::conditional_t<SupportChaining::value, R1, MozPromise>;
+
+ public:
+ ThenValue(nsISerialEventTarget* aResponseTarget, ThisType* aThisVal,
+ ResolveRejectMethodType aResolveRejectMethod,
+ const char* aCallSite)
+ : ThenValueBase(aResponseTarget, aCallSite),
+ mThisVal(aThisVal),
+ mResolveRejectMethod(aResolveRejectMethod) {}
+
+ void Disconnect() override {
+ ThenValueBase::Disconnect();
+
+ // If a Request has been disconnected, we don't guarantee that the
+ // resolve/reject runnable will be dispatched. Null out our refcounted
+ // this-value now so that it's released predictably on the dispatch
+ // thread.
+ mThisVal = nullptr;
+ }
+
+ protected:
+ MozPromiseBase* CompletionPromise() const override {
+ return mCompletionPromise;
+ }
+
+ void DoResolveOrRejectInternal(ResolveOrRejectValue& aValue) override {
+ InvokeCallbackMethod<SupportChaining::value>(
+ mThisVal.get(), mResolveRejectMethod, MaybeMove(aValue),
+ std::move(mCompletionPromise));
+
+ // Null out mThisVal after invoking the callback so that any references
+ // are released predictably on the dispatch thread. Otherwise, it would be
+ // released on whatever thread last drops its reference to the ThenValue,
+ // which may or may not be ok.
+ mThisVal = nullptr;
+ }
+
+ private:
+ RefPtr<ThisType>
+ mThisVal; // Only accessed and refcounted on dispatch thread.
+ ResolveRejectMethodType mResolveRejectMethod;
+ RefPtr<typename PromiseType::Private> mCompletionPromise;
+ };
+
+ // NB: We could use std::function here instead of a template if it were
+ // supported. :-(
+ template <typename ResolveFunction, typename RejectFunction>
+ class ThenValue<ResolveFunction, RejectFunction> : public ThenValueBase {
+ friend class ThenCommand<ThenValue>;
+
+ using R1 = typename RemoveSmartPointer<
+ typename detail::MethodTrait<ResolveFunction>::ReturnType>::Type;
+ using R2 = typename RemoveSmartPointer<
+ typename detail::MethodTrait<RejectFunction>::ReturnType>::Type;
+ using SupportChaining =
+ std::integral_constant<bool, IsMozPromise<R1>::value &&
+ std::is_same_v<R1, R2>>;
+
+ // Fall back to MozPromise when promise chaining is not supported to make
+ // code compile.
+ using PromiseType =
+ std::conditional_t<SupportChaining::value, R1, MozPromise>;
+
+ public:
+ ThenValue(nsISerialEventTarget* aResponseTarget,
+ ResolveFunction&& aResolveFunction,
+ RejectFunction&& aRejectFunction, const char* aCallSite)
+ : ThenValueBase(aResponseTarget, aCallSite) {
+ mResolveFunction.emplace(std::move(aResolveFunction));
+ mRejectFunction.emplace(std::move(aRejectFunction));
+ }
+
+ void Disconnect() override {
+ ThenValueBase::Disconnect();
+
+ // If a Request has been disconnected, we don't guarantee that the
+ // resolve/reject runnable will be dispatched. Destroy our callbacks
+ // now so that any references in closures are released predictable on
+ // the dispatch thread.
+ mResolveFunction.reset();
+ mRejectFunction.reset();
+ }
+
+ protected:
+ MozPromiseBase* CompletionPromise() const override {
+ return mCompletionPromise;
+ }
+
+ void DoResolveOrRejectInternal(ResolveOrRejectValue& aValue) override {
+ // Note: The usage of InvokeCallbackMethod here requires that
+ // ResolveFunction/RejectFunction are capture-lambdas (i.e. anonymous
+ // classes with ::operator()), since it allows us to share code more
+ // easily. We could fix this if need be, though it's quite easy to work
+ // around by just capturing something.
+ if (aValue.IsResolve()) {
+ InvokeCallbackMethod<SupportChaining::value>(
+ mResolveFunction.ptr(), &ResolveFunction::operator(),
+ MaybeMove(aValue.ResolveValue()), std::move(mCompletionPromise));
+ } else {
+ InvokeCallbackMethod<SupportChaining::value>(
+ mRejectFunction.ptr(), &RejectFunction::operator(),
+ MaybeMove(aValue.RejectValue()), std::move(mCompletionPromise));
+ }
+
+ // Destroy callbacks after invocation so that any references in closures
+ // are released predictably on the dispatch thread. Otherwise, they would
+ // be released on whatever thread last drops its reference to the
+ // ThenValue, which may or may not be ok.
+ mResolveFunction.reset();
+ mRejectFunction.reset();
+ }
+
+ private:
+ Maybe<ResolveFunction>
+ mResolveFunction; // Only accessed and deleted on dispatch thread.
+ Maybe<RejectFunction>
+ mRejectFunction; // Only accessed and deleted on dispatch thread.
+ RefPtr<typename PromiseType::Private> mCompletionPromise;
+ };
+
+ template <typename ResolveRejectFunction>
+ class ThenValue<ResolveRejectFunction> : public ThenValueBase {
+ friend class ThenCommand<ThenValue>;
+
+ using R1 = typename RemoveSmartPointer<
+ typename detail::MethodTrait<ResolveRejectFunction>::ReturnType>::Type;
+ using SupportChaining =
+ std::integral_constant<bool, IsMozPromise<R1>::value>;
+
+ // Fall back to MozPromise when promise chaining is not supported to make
+ // code compile.
+ using PromiseType =
+ std::conditional_t<SupportChaining::value, R1, MozPromise>;
+
+ public:
+ ThenValue(nsISerialEventTarget* aResponseTarget,
+ ResolveRejectFunction&& aResolveRejectFunction,
+ const char* aCallSite)
+ : ThenValueBase(aResponseTarget, aCallSite) {
+ mResolveRejectFunction.emplace(std::move(aResolveRejectFunction));
+ }
+
+ void Disconnect() override {
+ ThenValueBase::Disconnect();
+
+ // If a Request has been disconnected, we don't guarantee that the
+ // resolve/reject runnable will be dispatched. Destroy our callbacks
+ // now so that any references in closures are released predictable on
+ // the dispatch thread.
+ mResolveRejectFunction.reset();
+ }
+
+ protected:
+ MozPromiseBase* CompletionPromise() const override {
+ return mCompletionPromise;
+ }
+
+ void DoResolveOrRejectInternal(ResolveOrRejectValue& aValue) override {
+ // Note: The usage of InvokeCallbackMethod here requires that
+ // ResolveRejectFunction is capture-lambdas (i.e. anonymous
+ // classes with ::operator()), since it allows us to share code more
+ // easily. We could fix this if need be, though it's quite easy to work
+ // around by just capturing something.
+ InvokeCallbackMethod<SupportChaining::value>(
+ mResolveRejectFunction.ptr(), &ResolveRejectFunction::operator(),
+ MaybeMove(aValue), std::move(mCompletionPromise));
+
+ // Destroy callbacks after invocation so that any references in closures
+ // are released predictably on the dispatch thread. Otherwise, they would
+ // be released on whatever thread last drops its reference to the
+ // ThenValue, which may or may not be ok.
+ mResolveRejectFunction.reset();
+ }
+
+ private:
+ Maybe<ResolveRejectFunction>
+ mResolveRejectFunction; // Only accessed and deleted on dispatch
+ // thread.
+ RefPtr<typename PromiseType::Private> mCompletionPromise;
+ };
+
+ public:
+ void ThenInternal(already_AddRefed<ThenValueBase> aThenValue,
+ const char* aCallSite) {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic &&
+ mMagic3 == sMagic && mMagic4 == &mMutex);
+ RefPtr<ThenValueBase> thenValue = aThenValue;
+ MutexAutoLock lock(mMutex);
+ MOZ_DIAGNOSTIC_ASSERT(
+ !IsExclusive || !mHaveRequest,
+ "Using an exclusive promise in a non-exclusive fashion");
+ mHaveRequest = true;
+ PROMISE_LOG("%s invoking Then() [this=%p, aThenValue=%p, isPending=%d]",
+ aCallSite, this, thenValue.get(), (int)IsPending());
+ if (!IsPending()) {
+ thenValue->Dispatch(this);
+ } else {
+ mThenValues.AppendElement(thenValue.forget());
+ }
+ }
+
+ protected:
+ /*
+ * A command object to store all information needed to make a request to
+ * the promise. This allows us to delay the request until further use is
+ * known (whether it is ->Then() again for more promise chaining or ->Track()
+ * to terminate chaining and issue the request).
+ *
+ * This allows a unified syntax for promise chaining and disconnection
+ * and feels more like its JS counterpart.
+ */
+ template <typename ThenValueType>
+ class ThenCommand {
+ // Allow Promise1::ThenCommand to access the private constructor,
+ // Promise2::ThenCommand(ThenCommand&&).
+ template <typename, typename, bool>
+ friend class MozPromise;
+
+ using PromiseType = typename ThenValueType::PromiseType;
+ using Private = typename PromiseType::Private;
+
+ ThenCommand(const char* aCallSite,
+ already_AddRefed<ThenValueType> aThenValue,
+ MozPromise* aReceiver)
+ : mCallSite(aCallSite), mThenValue(aThenValue), mReceiver(aReceiver) {}
+
+ ThenCommand(ThenCommand&& aOther) = default;
+
+ public:
+ ~ThenCommand() {
+ // Issue the request now if the return value of Then() is not used.
+ if (mThenValue) {
+ mReceiver->ThenInternal(mThenValue.forget(), mCallSite);
+ }
+ }
+
+ // Allow RefPtr<MozPromise> p = somePromise->Then();
+ // p->Then(thread1, ...);
+ // p->Then(thread2, ...);
+ operator RefPtr<PromiseType>() {
+ static_assert(
+ ThenValueType::SupportChaining::value,
+ "The resolve/reject callback needs to return a RefPtr<MozPromise> "
+ "in order to do promise chaining.");
+
+ // mCompletionPromise must be created before ThenInternal() to avoid race.
+ RefPtr<Private> p =
+ new Private("<completion promise>", true /* aIsCompletionPromise */);
+ mThenValue->mCompletionPromise = p;
+ // Note ThenInternal() might nullify mCompletionPromise before return.
+ // So we need to return p instead of mCompletionPromise.
+ mReceiver->ThenInternal(mThenValue.forget(), mCallSite);
+ return p;
+ }
+
+ template <typename... Ts>
+ auto Then(Ts&&... aArgs) -> decltype(std::declval<PromiseType>().Then(
+ std::forward<Ts>(aArgs)...)) {
+ return static_cast<RefPtr<PromiseType>>(*this)->Then(
+ std::forward<Ts>(aArgs)...);
+ }
+
+ void Track(MozPromiseRequestHolder<MozPromise>& aRequestHolder) {
+ aRequestHolder.Track(do_AddRef(mThenValue));
+ mReceiver->ThenInternal(mThenValue.forget(), mCallSite);
+ }
+
+ // Allow calling ->Then() again for more promise chaining or ->Track() to
+ // end chaining and track the request for future disconnection.
+ ThenCommand* operator->() { return this; }
+
+ private:
+ const char* mCallSite;
+ RefPtr<ThenValueType> mThenValue;
+ RefPtr<MozPromise> mReceiver;
+ };
+
+ public:
+ template <typename ThisType, typename... Methods,
+ typename ThenValueType = ThenValue<ThisType*, Methods...>,
+ typename ReturnType = ThenCommand<ThenValueType>>
+ ReturnType Then(nsISerialEventTarget* aResponseTarget, const char* aCallSite,
+ ThisType* aThisVal, Methods... aMethods) {
+ RefPtr<ThenValueType> thenValue =
+ new ThenValueType(aResponseTarget, aThisVal, aMethods..., aCallSite);
+ return ReturnType(aCallSite, thenValue.forget(), this);
+ }
+
+ template <typename... Functions,
+ typename ThenValueType = ThenValue<Functions...>,
+ typename ReturnType = ThenCommand<ThenValueType>>
+ ReturnType Then(nsISerialEventTarget* aResponseTarget, const char* aCallSite,
+ Functions&&... aFunctions) {
+ RefPtr<ThenValueType> thenValue =
+ new ThenValueType(aResponseTarget, std::move(aFunctions)..., aCallSite);
+ return ReturnType(aCallSite, thenValue.forget(), this);
+ }
+
+ void ChainTo(already_AddRefed<Private> aChainedPromise,
+ const char* aCallSite) {
+ MutexAutoLock lock(mMutex);
+ MOZ_DIAGNOSTIC_ASSERT(
+ !IsExclusive || !mHaveRequest,
+ "Using an exclusive promise in a non-exclusive fashion");
+ mHaveRequest = true;
+ RefPtr<Private> chainedPromise = aChainedPromise;
+ PROMISE_LOG(
+ "%s invoking Chain() [this=%p, chainedPromise=%p, isPending=%d]",
+ aCallSite, this, chainedPromise.get(), (int)IsPending());
+
+ // We want to use the same type of dispatching method with the chained
+ // promises.
+
+ // We need to ensure that the UseSynchronousTaskDispatch branch isn't taken
+ // at compilation time to ensure we're not triggering the static_assert in
+ // UseSynchronousTaskDispatch method. if constexpr (IsExclusive) ensures
+ // that.
+ if (mUseDirectTaskDispatch) {
+ chainedPromise->UseDirectTaskDispatch(aCallSite);
+ } else if constexpr (IsExclusive) {
+ if (mUseSynchronousTaskDispatch) {
+ chainedPromise->UseSynchronousTaskDispatch(aCallSite);
+ }
+ } else {
+ chainedPromise->SetTaskPriority(mPriority, aCallSite);
+ }
+
+ if (!IsPending()) {
+ ForwardTo(chainedPromise);
+ } else {
+ mChainedPromises.AppendElement(chainedPromise);
+ }
+ }
+
+# ifdef MOZ_WIDGET_ANDROID
+ // Creates a C++ MozPromise from its Java counterpart, GeckoResult.
+ [[nodiscard]] static RefPtr<MozPromise> FromGeckoResult(
+ java::GeckoResult::Param aGeckoResult) {
+ using jni::GeckoResultCallback;
+ RefPtr<Private> p = new Private("GeckoResult Glue", false);
+ auto resolve = GeckoResultCallback::CreateAndAttach<ResolveValueType>(
+ [p](ResolveValueType&& aArg) {
+ p->Resolve(MaybeMove(aArg), __func__);
+ });
+ auto reject = GeckoResultCallback::CreateAndAttach<RejectValueType>(
+ [p](RejectValueType&& aArg) { p->Reject(MaybeMove(aArg), __func__); });
+ aGeckoResult->NativeThen(resolve, reject);
+ return p;
+ }
+# endif
+
+ // Creates a C++ MozPromise from its JS counterpart, dom::Promise.
+ // FromDomPromise currently only supports primitive types (int8/16/32, float,
+ // double) And the reject value type must be a nsresult.
+ // To use, please include MozPromiseInlines.h
+ static RefPtr<MozPromise> FromDomPromise(dom::Promise* aDOMPromise);
+
+ // Note we expose the function AssertIsDead() instead of IsDead() since
+ // checking IsDead() is a data race in the situation where the request is not
+ // dead. Therefore we enforce the form |Assert(IsDead())| by exposing
+ // AssertIsDead() only.
+ void AssertIsDead() override {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic &&
+ mMagic3 == sMagic && mMagic4 == &mMutex);
+ MutexAutoLock lock(mMutex);
+ for (auto&& then : mThenValues) {
+ then->AssertIsDead();
+ }
+ for (auto&& chained : mChainedPromises) {
+ chained->AssertIsDead();
+ }
+ }
+
+ bool IsResolved() const { return mValue.IsResolve(); }
+
+ protected:
+ bool IsPending() const { return mValue.IsNothing(); }
+
+ ResolveOrRejectValue& Value() {
+ // This method should only be called once the value has stabilized. As
+ // such, we don't need to acquire the lock here.
+ MOZ_DIAGNOSTIC_ASSERT(!IsPending());
+ return mValue;
+ }
+
+ void DispatchAll() {
+ mMutex.AssertCurrentThreadOwns();
+ for (auto&& thenValue : mThenValues) {
+ thenValue->Dispatch(this);
+ }
+ mThenValues.Clear();
+
+ for (auto&& chainedPromise : mChainedPromises) {
+ ForwardTo(chainedPromise);
+ }
+ mChainedPromises.Clear();
+ }
+
+ void ForwardTo(Private* aOther) {
+ MOZ_ASSERT(!IsPending());
+ if (mValue.IsResolve()) {
+ aOther->Resolve(MaybeMove(mValue.ResolveValue()), "<chained promise>");
+ } else {
+ aOther->Reject(MaybeMove(mValue.RejectValue()), "<chained promise>");
+ }
+ }
+
+ virtual ~MozPromise() {
+ PROMISE_LOG("MozPromise::~MozPromise [this=%p]", this);
+ AssertIsDead();
+ // We can't guarantee a completion promise will always be revolved or
+ // rejected since ResolveOrRejectRunnable might not run when dispatch fails.
+ if (!mIsCompletionPromise) {
+ MOZ_ASSERT(!IsPending());
+ MOZ_ASSERT(mThenValues.IsEmpty());
+ MOZ_ASSERT(mChainedPromises.IsEmpty());
+ }
+# ifdef PROMISE_DEBUG
+ mMagic1 = 0;
+ mMagic2 = 0;
+ mMagic3 = 0;
+ mMagic4 = nullptr;
+# endif
+ };
+
+ const char* mCreationSite; // For logging
+ Mutex mMutex MOZ_UNANNOTATED;
+ ResolveOrRejectValue mValue;
+ bool mUseSynchronousTaskDispatch = false;
+ bool mUseDirectTaskDispatch = false;
+ uint32_t mPriority = nsIRunnablePriority::PRIORITY_NORMAL;
+# ifdef PROMISE_DEBUG
+ uint32_t mMagic1 = sMagic;
+# endif
+ // Try shows we never have more than 3 elements when IsExclusive is false.
+ // So '3' is a good value to avoid heap allocation in most cases.
+ AutoTArray<RefPtr<ThenValueBase>, IsExclusive ? 1 : 3> mThenValues;
+# ifdef PROMISE_DEBUG
+ uint32_t mMagic2 = sMagic;
+# endif
+ nsTArray<RefPtr<Private>> mChainedPromises;
+# ifdef PROMISE_DEBUG
+ uint32_t mMagic3 = sMagic;
+# endif
+ bool mHaveRequest;
+ const bool mIsCompletionPromise;
+# ifdef PROMISE_DEBUG
+ void* mMagic4;
+# endif
+};
+
+template <typename ResolveValueT, typename RejectValueT, bool IsExclusive>
+class MozPromise<ResolveValueT, RejectValueT, IsExclusive>::Private
+ : public MozPromise<ResolveValueT, RejectValueT, IsExclusive> {
+ public:
+ explicit Private(const char* aCreationSite, bool aIsCompletionPromise = false)
+ : MozPromise(aCreationSite, aIsCompletionPromise) {}
+
+ template <typename ResolveValueT_>
+ void Resolve(ResolveValueT_&& aResolveValue, const char* aResolveSite) {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic &&
+ mMagic3 == sMagic && mMagic4 == &mMutex);
+ MutexAutoLock lock(mMutex);
+ PROMISE_LOG("%s resolving MozPromise (%p created at %s)", aResolveSite,
+ this, mCreationSite);
+ if (!IsPending()) {
+ PROMISE_LOG(
+ "%s ignored already resolved or rejected MozPromise (%p created at "
+ "%s)",
+ aResolveSite, this, mCreationSite);
+ return;
+ }
+ mValue.SetResolve(std::forward<ResolveValueT_>(aResolveValue));
+ DispatchAll();
+ }
+
+ template <typename RejectValueT_>
+ void Reject(RejectValueT_&& aRejectValue, const char* aRejectSite) {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic &&
+ mMagic3 == sMagic && mMagic4 == &mMutex);
+ MutexAutoLock lock(mMutex);
+ PROMISE_LOG("%s rejecting MozPromise (%p created at %s)", aRejectSite, this,
+ mCreationSite);
+ if (!IsPending()) {
+ PROMISE_LOG(
+ "%s ignored already resolved or rejected MozPromise (%p created at "
+ "%s)",
+ aRejectSite, this, mCreationSite);
+ return;
+ }
+ mValue.SetReject(std::forward<RejectValueT_>(aRejectValue));
+ DispatchAll();
+ }
+
+ template <typename ResolveOrRejectValue_>
+ void ResolveOrReject(ResolveOrRejectValue_&& aValue, const char* aSite) {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic &&
+ mMagic3 == sMagic && mMagic4 == &mMutex);
+ MutexAutoLock lock(mMutex);
+ PROMISE_LOG("%s resolveOrRejecting MozPromise (%p created at %s)", aSite,
+ this, mCreationSite);
+ if (!IsPending()) {
+ PROMISE_LOG(
+ "%s ignored already resolved or rejected MozPromise (%p created at "
+ "%s)",
+ aSite, this, mCreationSite);
+ return;
+ }
+ mValue = std::forward<ResolveOrRejectValue_>(aValue);
+ DispatchAll();
+ }
+
+ // If the caller and target are both on the same thread, run the the resolve
+ // or reject callback synchronously. Otherwise, the task will be dispatched
+ // via the target Dispatch method.
+ void UseSynchronousTaskDispatch(const char* aSite) {
+ static_assert(
+ IsExclusive,
+ "Synchronous dispatch can only be used with exclusive promises");
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic &&
+ mMagic3 == sMagic && mMagic4 == &mMutex);
+ MutexAutoLock lock(mMutex);
+ PROMISE_LOG("%s UseSynchronousTaskDispatch MozPromise (%p created at %s)",
+ aSite, this, mCreationSite);
+ MOZ_ASSERT(IsPending(),
+ "A Promise must not have been already resolved or rejected to "
+ "set dispatch state");
+ mUseSynchronousTaskDispatch = true;
+ }
+
+ // If the caller and target are both on the same thread, run the
+ // resolve/reject callback off the direct task queue instead. This avoids a
+ // full trip to the back of the event queue for each additional asynchronous
+ // step when using MozPromise, and is similar (but not identical to) the
+ // microtask semantics of JS promises.
+ void UseDirectTaskDispatch(const char* aSite) {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic &&
+ mMagic3 == sMagic && mMagic4 == &mMutex);
+ MutexAutoLock lock(mMutex);
+ PROMISE_LOG("%s UseDirectTaskDispatch MozPromise (%p created at %s)", aSite,
+ this, mCreationSite);
+ MOZ_ASSERT(IsPending(),
+ "A Promise must not have been already resolved or rejected to "
+ "set dispatch state");
+ MOZ_ASSERT(!mUseSynchronousTaskDispatch,
+ "Promise already set for synchronous dispatch");
+ mUseDirectTaskDispatch = true;
+ }
+
+ // If the resolve/reject will be handled on a thread supporting priorities,
+ // one may want to tweak the priority of the task by passing a
+ // nsIRunnablePriority::PRIORITY_* to SetTaskPriority.
+ void SetTaskPriority(uint32_t aPriority, const char* aSite) {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic &&
+ mMagic3 == sMagic && mMagic4 == &mMutex);
+ MutexAutoLock lock(mMutex);
+ PROMISE_LOG("%s TaskPriority MozPromise (%p created at %s)", aSite, this,
+ mCreationSite);
+ MOZ_ASSERT(IsPending(),
+ "A Promise must not have been already resolved or rejected to "
+ "set dispatch state");
+ MOZ_ASSERT(!mUseSynchronousTaskDispatch,
+ "Promise already set for synchronous dispatch");
+ MOZ_ASSERT(!mUseDirectTaskDispatch,
+ "Promise already set for direct dispatch");
+ mPriority = aPriority;
+ }
+};
+
+// A generic promise type that does the trick for simple use cases.
+typedef MozPromise<bool, nsresult, /* IsExclusive = */ true> GenericPromise;
+
+// A generic, non-exclusive promise type that does the trick for simple use
+// cases.
+typedef MozPromise<bool, nsresult, /* IsExclusive = */ false>
+ GenericNonExclusivePromise;
+
+/*
+ * Class to encapsulate a promise for a particular role. Use this as the member
+ * variable for a class whose method returns a promise.
+ */
+template <typename PromiseType, typename ImplType>
+class MozPromiseHolderBase {
+ public:
+ MozPromiseHolderBase() = default;
+
+ MozPromiseHolderBase(MozPromiseHolderBase&& aOther) = default;
+ MozPromiseHolderBase& operator=(MozPromiseHolderBase&& aOther) = default;
+
+ ~MozPromiseHolderBase() { MOZ_ASSERT(!mPromise); }
+
+ already_AddRefed<PromiseType> Ensure(const char* aMethodName) {
+ static_cast<ImplType*>(this)->Check();
+ if (!mPromise) {
+ mPromise = new (typename PromiseType::Private)(aMethodName);
+ }
+ RefPtr<PromiseType> p = mPromise.get();
+ return p.forget();
+ }
+
+ bool IsEmpty() const {
+ static_cast<const ImplType*>(this)->Check();
+ return !mPromise;
+ }
+
+ already_AddRefed<typename PromiseType::Private> Steal() {
+ static_cast<ImplType*>(this)->Check();
+ return mPromise.forget();
+ }
+
+ template <typename ResolveValueType_>
+ void Resolve(ResolveValueType_&& aResolveValue, const char* aMethodName) {
+ static_assert(std::is_convertible_v<ResolveValueType_,
+ typename PromiseType::ResolveValueType>,
+ "Resolve() argument must be implicitly convertible to "
+ "MozPromise's ResolveValueT");
+
+ static_cast<ImplType*>(this)->Check();
+ MOZ_ASSERT(mPromise);
+ mPromise->Resolve(std::forward<ResolveValueType_>(aResolveValue),
+ aMethodName);
+ mPromise = nullptr;
+ }
+
+ template <typename ResolveValueType_>
+ void ResolveIfExists(ResolveValueType_&& aResolveValue,
+ const char* aMethodName) {
+ if (!IsEmpty()) {
+ Resolve(std::forward<ResolveValueType_>(aResolveValue), aMethodName);
+ }
+ }
+
+ template <typename RejectValueType_>
+ void Reject(RejectValueType_&& aRejectValue, const char* aMethodName) {
+ static_assert(std::is_convertible_v<RejectValueType_,
+ typename PromiseType::RejectValueType>,
+ "Reject() argument must be implicitly convertible to "
+ "MozPromise's RejectValueT");
+
+ static_cast<ImplType*>(this)->Check();
+ MOZ_ASSERT(mPromise);
+ mPromise->Reject(std::forward<RejectValueType_>(aRejectValue), aMethodName);
+ mPromise = nullptr;
+ }
+
+ template <typename RejectValueType_>
+ void RejectIfExists(RejectValueType_&& aRejectValue,
+ const char* aMethodName) {
+ if (!IsEmpty()) {
+ Reject(std::forward<RejectValueType_>(aRejectValue), aMethodName);
+ }
+ }
+
+ template <typename ResolveOrRejectValueType_>
+ void ResolveOrReject(ResolveOrRejectValueType_&& aValue,
+ const char* aMethodName) {
+ static_cast<ImplType*>(this)->Check();
+ MOZ_ASSERT(mPromise);
+ mPromise->ResolveOrReject(std::forward<ResolveOrRejectValueType_>(aValue),
+ aMethodName);
+ mPromise = nullptr;
+ }
+
+ template <typename ResolveOrRejectValueType_>
+ void ResolveOrRejectIfExists(ResolveOrRejectValueType_&& aValue,
+ const char* aMethodName) {
+ if (!IsEmpty()) {
+ ResolveOrReject(std::forward<ResolveOrRejectValueType_>(aValue),
+ aMethodName);
+ }
+ }
+
+ void UseSynchronousTaskDispatch(const char* aSite) {
+ MOZ_ASSERT(mPromise);
+ mPromise->UseSynchronousTaskDispatch(aSite);
+ }
+
+ void UseDirectTaskDispatch(const char* aSite) {
+ MOZ_ASSERT(mPromise);
+ mPromise->UseDirectTaskDispatch(aSite);
+ }
+
+ void SetTaskPriority(uint32_t aPriority, const char* aSite) {
+ MOZ_ASSERT(mPromise);
+ mPromise->SetTaskPriority(aPriority, aSite);
+ }
+
+ private:
+ RefPtr<typename PromiseType::Private> mPromise;
+};
+
+template <typename PromiseType>
+class MozPromiseHolder
+ : public MozPromiseHolderBase<PromiseType, MozPromiseHolder<PromiseType>> {
+ public:
+ using MozPromiseHolderBase<
+ PromiseType, MozPromiseHolder<PromiseType>>::MozPromiseHolderBase;
+ static constexpr void Check(){};
+};
+
+template <typename PromiseType>
+class MozMonitoredPromiseHolder
+ : public MozPromiseHolderBase<PromiseType,
+ MozMonitoredPromiseHolder<PromiseType>> {
+ public:
+ // Provide a Monitor that should always be held when accessing this instance.
+ explicit MozMonitoredPromiseHolder(Monitor* const aMonitor)
+ : mMonitor(aMonitor) {
+ MOZ_ASSERT(aMonitor);
+ }
+
+ MozMonitoredPromiseHolder(MozMonitoredPromiseHolder&& aOther) = delete;
+ MozMonitoredPromiseHolder& operator=(MozMonitoredPromiseHolder&& aOther) =
+ delete;
+
+ void Check() const { mMonitor->AssertCurrentThreadOwns(); }
+
+ private:
+ Monitor* const mMonitor;
+};
+
+/*
+ * Class to encapsulate a MozPromise::Request reference. Use this as the member
+ * variable for a class waiting on a MozPromise.
+ */
+template <typename PromiseType>
+class MozPromiseRequestHolder {
+ public:
+ MozPromiseRequestHolder() = default;
+ ~MozPromiseRequestHolder() { MOZ_ASSERT(!mRequest); }
+
+ void Track(already_AddRefed<typename PromiseType::Request> aRequest) {
+ MOZ_DIAGNOSTIC_ASSERT(!Exists());
+ mRequest = aRequest;
+ }
+
+ void Complete() {
+ MOZ_DIAGNOSTIC_ASSERT(Exists());
+ mRequest = nullptr;
+ }
+
+ // Disconnects and forgets an outstanding promise. The resolve/reject methods
+ // will never be called.
+ void Disconnect() {
+ MOZ_ASSERT(Exists());
+ mRequest->Disconnect();
+ mRequest = nullptr;
+ }
+
+ void DisconnectIfExists() {
+ if (Exists()) {
+ Disconnect();
+ }
+ }
+
+ bool Exists() const { return !!mRequest; }
+
+ private:
+ RefPtr<typename PromiseType::Request> mRequest;
+};
+
+// Asynchronous Potentially-Cross-Thread Method Calls.
+//
+// This machinery allows callers to schedule a promise-returning function
+// (a method and object, or a function object like a lambda) to be invoked
+// asynchronously on a given thread, while at the same time receiving a
+// promise upon which to invoke Then() immediately. InvokeAsync dispatches a
+// task to invoke the function on the proper thread and also chain the
+// resulting promise to the one that the caller received, so that resolve/
+// reject values are forwarded through.
+
+namespace detail {
+
+// Non-templated base class to allow us to use MOZ_COUNT_{C,D}TOR, which cause
+// assertions when used on templated types.
+class MethodCallBase {
+ public:
+ MOZ_COUNTED_DEFAULT_CTOR(MethodCallBase)
+ MOZ_COUNTED_DTOR_VIRTUAL(MethodCallBase)
+};
+
+template <typename PromiseType, typename MethodType, typename ThisType,
+ typename... Storages>
+class MethodCall : public MethodCallBase {
+ public:
+ template <typename... Args>
+ MethodCall(MethodType aMethod, ThisType* aThisVal, Args&&... aArgs)
+ : mMethod(aMethod),
+ mThisVal(aThisVal),
+ mArgs(std::forward<Args>(aArgs)...) {
+ static_assert(sizeof...(Storages) == sizeof...(Args),
+ "Storages and Args should have equal sizes");
+ }
+
+ RefPtr<PromiseType> Invoke() { return mArgs.apply(mThisVal.get(), mMethod); }
+
+ private:
+ MethodType mMethod;
+ RefPtr<ThisType> mThisVal;
+ RunnableMethodArguments<Storages...> mArgs;
+};
+
+template <typename PromiseType, typename MethodType, typename ThisType,
+ typename... Storages>
+class ProxyRunnable : public CancelableRunnable {
+ public:
+ ProxyRunnable(
+ typename PromiseType::Private* aProxyPromise,
+ MethodCall<PromiseType, MethodType, ThisType, Storages...>* aMethodCall)
+ : CancelableRunnable("detail::ProxyRunnable"),
+ mProxyPromise(aProxyPromise),
+ mMethodCall(aMethodCall) {}
+
+ NS_IMETHOD Run() override {
+ RefPtr<PromiseType> p = mMethodCall->Invoke();
+ mMethodCall = nullptr;
+ p->ChainTo(mProxyPromise.forget(), "<Proxy Promise>");
+ return NS_OK;
+ }
+
+ nsresult Cancel() override { return Run(); }
+
+ private:
+ RefPtr<typename PromiseType::Private> mProxyPromise;
+ UniquePtr<MethodCall<PromiseType, MethodType, ThisType, Storages...>>
+ mMethodCall;
+};
+
+template <typename... Storages, typename PromiseType, typename ThisType,
+ typename... ArgTypes, typename... ActualArgTypes>
+static RefPtr<PromiseType> InvokeAsyncImpl(
+ nsISerialEventTarget* aTarget, ThisType* aThisVal, const char* aCallerName,
+ RefPtr<PromiseType> (ThisType::*aMethod)(ArgTypes...),
+ ActualArgTypes&&... aArgs) {
+ MOZ_ASSERT(aTarget);
+
+ typedef RefPtr<PromiseType> (ThisType::*MethodType)(ArgTypes...);
+ typedef detail::MethodCall<PromiseType, MethodType, ThisType, Storages...>
+ MethodCallType;
+ typedef detail::ProxyRunnable<PromiseType, MethodType, ThisType, Storages...>
+ ProxyRunnableType;
+
+ MethodCallType* methodCall = new MethodCallType(
+ aMethod, aThisVal, std::forward<ActualArgTypes>(aArgs)...);
+ RefPtr<typename PromiseType::Private> p =
+ new (typename PromiseType::Private)(aCallerName);
+ RefPtr<ProxyRunnableType> r = new ProxyRunnableType(p, methodCall);
+ aTarget->Dispatch(r.forget());
+ return p;
+}
+
+constexpr bool Any() { return false; }
+
+template <typename T1>
+constexpr bool Any(T1 a) {
+ return static_cast<bool>(a);
+}
+
+template <typename T1, typename... Ts>
+constexpr bool Any(T1 a, Ts... aOthers) {
+ return a || Any(aOthers...);
+}
+
+} // namespace detail
+
+// InvokeAsync with explicitly-specified storages.
+// See ParameterStorage in nsThreadUtils.h for help.
+template <typename... Storages, typename PromiseType, typename ThisType,
+ typename... ArgTypes, typename... ActualArgTypes,
+ std::enable_if_t<sizeof...(Storages) != 0, int> = 0>
+static RefPtr<PromiseType> InvokeAsync(
+ nsISerialEventTarget* aTarget, ThisType* aThisVal, const char* aCallerName,
+ RefPtr<PromiseType> (ThisType::*aMethod)(ArgTypes...),
+ ActualArgTypes&&... aArgs) {
+ static_assert(
+ sizeof...(Storages) == sizeof...(ArgTypes),
+ "Provided Storages and method's ArgTypes should have equal sizes");
+ static_assert(sizeof...(Storages) == sizeof...(ActualArgTypes),
+ "Provided Storages and ActualArgTypes should have equal sizes");
+ return detail::InvokeAsyncImpl<Storages...>(
+ aTarget, aThisVal, aCallerName, aMethod,
+ std::forward<ActualArgTypes>(aArgs)...);
+}
+
+// InvokeAsync with no explicitly-specified storages, will copy arguments and
+// then move them out of the runnable into the target method parameters.
+template <typename... Storages, typename PromiseType, typename ThisType,
+ typename... ArgTypes, typename... ActualArgTypes,
+ std::enable_if_t<sizeof...(Storages) == 0, int> = 0>
+static RefPtr<PromiseType> InvokeAsync(
+ nsISerialEventTarget* aTarget, ThisType* aThisVal, const char* aCallerName,
+ RefPtr<PromiseType> (ThisType::*aMethod)(ArgTypes...),
+ ActualArgTypes&&... aArgs) {
+ static_assert(
+ !detail::Any(
+ std::is_pointer_v<std::remove_reference_t<ActualArgTypes>>...),
+ "Cannot pass pointer types through InvokeAsync, Storages must be "
+ "provided");
+ static_assert(sizeof...(ArgTypes) == sizeof...(ActualArgTypes),
+ "Method's ArgTypes and ActualArgTypes should have equal sizes");
+ return detail::InvokeAsyncImpl<
+ StoreCopyPassByRRef<std::decay_t<ActualArgTypes>>...>(
+ aTarget, aThisVal, aCallerName, aMethod,
+ std::forward<ActualArgTypes>(aArgs)...);
+}
+
+namespace detail {
+
+template <typename Function, typename PromiseType>
+class ProxyFunctionRunnable : public CancelableRunnable {
+ using FunctionStorage = std::decay_t<Function>;
+
+ public:
+ template <typename F>
+ ProxyFunctionRunnable(typename PromiseType::Private* aProxyPromise,
+ F&& aFunction)
+ : CancelableRunnable("detail::ProxyFunctionRunnable"),
+ mProxyPromise(aProxyPromise),
+ mFunction(new FunctionStorage(std::forward<F>(aFunction))) {}
+
+ NS_IMETHOD Run() override {
+ RefPtr<PromiseType> p = (*mFunction)();
+ mFunction = nullptr;
+ p->ChainTo(mProxyPromise.forget(), "<Proxy Promise>");
+ return NS_OK;
+ }
+
+ nsresult Cancel() override { return Run(); }
+
+ private:
+ RefPtr<typename PromiseType::Private> mProxyPromise;
+ UniquePtr<FunctionStorage> mFunction;
+};
+
+// Note: The following struct and function are not for public consumption (yet?)
+// as we would prefer all calls to pass on-the-spot lambdas (or at least moved
+// function objects). They could be moved outside of detail if really needed.
+
+// We prefer getting function objects by non-lvalue-ref (to avoid copying them
+// and their captures). This struct is a tag that allows the use of objects
+// through lvalue-refs where necessary.
+struct AllowInvokeAsyncFunctionLVRef {};
+
+// Invoke a function object (e.g., lambda or std/mozilla::function)
+// asynchronously; note that the object will be copied if provided by
+// lvalue-ref. Return a promise that the function should eventually resolve or
+// reject.
+template <typename Function>
+static auto InvokeAsync(nsISerialEventTarget* aTarget, const char* aCallerName,
+ AllowInvokeAsyncFunctionLVRef, Function&& aFunction)
+ -> decltype(aFunction()) {
+ static_assert(
+ IsRefcountedSmartPointer<decltype(aFunction())>::value &&
+ IsMozPromise<
+ typename RemoveSmartPointer<decltype(aFunction())>::Type>::value,
+ "Function object must return RefPtr<MozPromise>");
+ MOZ_ASSERT(aTarget);
+ typedef typename RemoveSmartPointer<decltype(aFunction())>::Type PromiseType;
+ typedef detail::ProxyFunctionRunnable<Function, PromiseType>
+ ProxyRunnableType;
+
+ auto p = MakeRefPtr<typename PromiseType::Private>(aCallerName);
+ auto r = MakeRefPtr<ProxyRunnableType>(p, std::forward<Function>(aFunction));
+ aTarget->Dispatch(r.forget());
+ return p;
+}
+
+} // namespace detail
+
+// Invoke a function object (e.g., lambda) asynchronously.
+// Return a promise that the function should eventually resolve or reject.
+template <typename Function>
+static auto InvokeAsync(nsISerialEventTarget* aTarget, const char* aCallerName,
+ Function&& aFunction) -> decltype(aFunction()) {
+ static_assert(!std::is_lvalue_reference_v<Function>,
+ "Function object must not be passed by lvalue-ref (to avoid "
+ "unplanned copies); Consider move()ing the object.");
+ return detail::InvokeAsync(aTarget, aCallerName,
+ detail::AllowInvokeAsyncFunctionLVRef(),
+ std::forward<Function>(aFunction));
+}
+
+# undef PROMISE_LOG
+# undef PROMISE_ASSERT
+# undef PROMISE_DEBUG
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/MozPromiseInlines.h b/xpcom/threads/MozPromiseInlines.h
new file mode 100644
index 0000000000..def7e90461
--- /dev/null
+++ b/xpcom/threads/MozPromiseInlines.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MozPromiseInlines_h_
+#define MozPromiseInlines_h_
+
+#include <type_traits>
+
+#include "mozilla/MozPromise.h"
+#include "mozilla/dom/PrimitiveConversions.h"
+#include "mozilla/dom/Promise.h"
+#include "mozilla/dom/PromiseNativeHandler.h"
+
+namespace mozilla {
+
+// Creates a C++ MozPromise from its JS counterpart, dom::Promise.
+// FromDomPromise currently only supports primitive types (int8/16/32, float,
+// double) And the reject value type must be a nsresult.
+template <typename ResolveValueT, typename RejectValueT, bool IsExclusive>
+RefPtr<MozPromise<ResolveValueT, RejectValueT, IsExclusive>>
+MozPromise<ResolveValueT, RejectValueT, IsExclusive>::FromDomPromise(
+ dom::Promise* aDOMPromise) {
+ static_assert(std::is_same_v<RejectValueType, nsresult>,
+ "Reject type must be nsresult");
+ RefPtr<Private> p = new Private(__func__);
+ RefPtr<dom::DomPromiseListener> listener = new dom::DomPromiseListener(
+ [p](JSContext* aCx, JS::Handle<JS::Value> aValue) {
+ ResolveValueT value;
+ bool ok = dom::ValueToPrimitive<ResolveValueT,
+ dom::ConversionBehavior::eDefault>(
+ aCx, aValue, "Resolution value", &value);
+ if (!ok) {
+ p->Reject(NS_ERROR_FAILURE, __func__);
+ return;
+ }
+ p->Resolve(value, __func__);
+ },
+ [p](nsresult aError) { p->Reject(aError, __func__); });
+ aDOMPromise->AppendNativeHandler(listener);
+ return p;
+}
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/Mutex.h b/xpcom/threads/Mutex.h
new file mode 100644
index 0000000000..346e946166
--- /dev/null
+++ b/xpcom/threads/Mutex.h
@@ -0,0 +1,452 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_Mutex_h
+#define mozilla_Mutex_h
+
+#include "mozilla/BlockingResourceBase.h"
+#include "mozilla/ThreadSafety.h"
+#include "mozilla/PlatformMutex.h"
+#include "nsISupports.h"
+
+//
+// Provides:
+//
+// - Mutex, a non-recursive mutex
+// - MutexAutoLock, an RAII class for ensuring that Mutexes are properly
+// locked and unlocked
+// - MutexAutoUnlock, complementary sibling to MutexAutoLock
+//
+// - OffTheBooksMutex, a non-recursive mutex that doesn't do leak checking
+// - OffTheBooksMutexAuto{Lock,Unlock} - Like MutexAuto{Lock,Unlock}, but for
+// an OffTheBooksMutex.
+//
+// Using MutexAutoLock/MutexAutoUnlock etc. is MUCH preferred to making bare
+// calls to Lock and Unlock.
+//
+namespace mozilla {
+
+/**
+ * OffTheBooksMutex is identical to Mutex, except that OffTheBooksMutex doesn't
+ * include leak checking. Sometimes you want to intentionally "leak" a mutex
+ * until shutdown; in these cases, OffTheBooksMutex is for you.
+ */
+class MOZ_CAPABILITY("mutex") OffTheBooksMutex : public detail::MutexImpl,
+ BlockingResourceBase {
+ public:
+ /**
+ * @param aName A name which can reference this lock
+ * @returns If failure, nullptr
+ * If success, a valid Mutex* which must be destroyed
+ * by Mutex::DestroyMutex()
+ **/
+ explicit OffTheBooksMutex(const char* aName)
+ : BlockingResourceBase(aName, eMutex)
+#ifdef DEBUG
+ ,
+ mOwningThread(nullptr)
+#endif
+ {
+ }
+
+ ~OffTheBooksMutex() {
+#ifdef DEBUG
+ MOZ_ASSERT(!mOwningThread, "destroying a still-owned lock!");
+#endif
+ }
+
+#ifndef DEBUG
+ /**
+ * Lock this mutex.
+ **/
+ void Lock() MOZ_CAPABILITY_ACQUIRE() { this->lock(); }
+
+ /**
+ * Try to lock this mutex, returning true if we were successful.
+ **/
+ [[nodiscard]] bool TryLock() MOZ_TRY_ACQUIRE(true) { return this->tryLock(); }
+
+ /**
+ * Unlock this mutex.
+ **/
+ void Unlock() MOZ_CAPABILITY_RELEASE() { this->unlock(); }
+
+ /**
+ * Assert that the current thread owns this mutex in debug builds.
+ *
+ * Does nothing in non-debug builds.
+ **/
+ void AssertCurrentThreadOwns() const MOZ_ASSERT_CAPABILITY(this) {}
+
+ /**
+ * Assert that the current thread does not own this mutex.
+ *
+ * Note that this function is not implemented for debug builds *and*
+ * non-debug builds due to difficulties in dealing with memory ordering.
+ *
+ * It is therefore mostly useful as documentation.
+ **/
+ void AssertNotCurrentThreadOwns() const MOZ_ASSERT_CAPABILITY(!this) {}
+
+#else
+ void Lock() MOZ_CAPABILITY_ACQUIRE();
+
+ [[nodiscard]] bool TryLock() MOZ_TRY_ACQUIRE(true);
+ void Unlock() MOZ_CAPABILITY_RELEASE();
+
+ void AssertCurrentThreadOwns() const MOZ_ASSERT_CAPABILITY(this);
+ void AssertNotCurrentThreadOwns() const MOZ_ASSERT_CAPABILITY(!this) {
+ // FIXME bug 476536
+ }
+#endif // ifndef DEBUG
+
+ private:
+ OffTheBooksMutex() = delete;
+ OffTheBooksMutex(const OffTheBooksMutex&) = delete;
+ OffTheBooksMutex& operator=(const OffTheBooksMutex&) = delete;
+
+ friend class OffTheBooksCondVar;
+
+#ifdef DEBUG
+ PRThread* mOwningThread;
+#endif
+};
+
+/**
+ * Mutex
+ * When possible, use MutexAutoLock/MutexAutoUnlock to lock/unlock this
+ * mutex within a scope, instead of calling Lock/Unlock directly.
+ */
+class Mutex : public OffTheBooksMutex {
+ public:
+ explicit Mutex(const char* aName) : OffTheBooksMutex(aName) {
+ MOZ_COUNT_CTOR(Mutex);
+ }
+
+ MOZ_COUNTED_DTOR(Mutex)
+
+ private:
+ Mutex() = delete;
+ Mutex(const Mutex&) = delete;
+ Mutex& operator=(const Mutex&) = delete;
+};
+
+/**
+ * MutexSingleWriter
+ *
+ * Mutex where a single writer exists, so that reads from the same thread
+ * will not generate data races or consistency issues.
+ *
+ * When possible, use MutexAutoLock/MutexAutoUnlock to lock/unlock this
+ * mutex within a scope, instead of calling Lock/Unlock directly.
+ *
+ * This requires an object implementing Mutex's SingleWriterLockOwner, so
+ * we can do correct-thread checks.
+ */
+// Subclass this in the object owning the mutex
+class SingleWriterLockOwner {
+ public:
+ SingleWriterLockOwner() = default;
+ ~SingleWriterLockOwner() = default;
+
+ virtual bool OnWritingThread() const = 0;
+};
+
+class MutexSingleWriter : public OffTheBooksMutex {
+ public:
+ // aOwner should be the object that contains the mutex, typically. We
+ // will use that object (which must have a lifetime the same or greater
+ // than this object) to verify that we're running on the correct thread,
+ // typically only in DEBUG builds
+ explicit MutexSingleWriter(const char* aName, SingleWriterLockOwner* aOwner)
+ : OffTheBooksMutex(aName)
+#ifdef DEBUG
+ ,
+ mOwner(aOwner)
+#endif
+ {
+ MOZ_COUNT_CTOR(MutexSingleWriter);
+ MOZ_ASSERT(mOwner);
+ }
+
+ MOZ_COUNTED_DTOR(MutexSingleWriter)
+
+ /**
+ * Statically assert that we're on the only thread that modifies data
+ * guarded by this Mutex. This allows static checking for the pattern of
+ * having a single thread modify a set of data, and read it (under lock)
+ * on other threads, and reads on the thread that modifies it doesn't
+ * require a lock. This doesn't solve the issue of some data under the
+ * Mutex following this pattern, and other data under the mutex being
+ * written from multiple threads.
+ *
+ * We could set the writing thread and dynamically check it in debug
+ * builds, but this doesn't. We could also use thread-safety/capability
+ * system to provide direct thread assertions.
+ **/
+ void AssertOnWritingThread() const MOZ_ASSERT_CAPABILITY(this) {
+ MOZ_ASSERT(mOwner->OnWritingThread());
+ }
+ void AssertOnWritingThreadOrHeld() const MOZ_ASSERT_CAPABILITY(this) {
+#ifdef DEBUG
+ if (!mOwner->OnWritingThread()) {
+ AssertCurrentThreadOwns();
+ }
+#endif
+ }
+
+ private:
+#ifdef DEBUG
+ SingleWriterLockOwner* mOwner MOZ_UNSAFE_REF(
+ "This is normally the object that contains the MonitorSingleWriter, so "
+ "we don't want to hold a reference to ourselves");
+#endif
+
+ MutexSingleWriter() = delete;
+ MutexSingleWriter(const MutexSingleWriter&) = delete;
+ MutexSingleWriter& operator=(const MutexSingleWriter&) = delete;
+};
+
+namespace detail {
+template <typename T>
+class MOZ_RAII BaseAutoUnlock;
+
+/**
+ * MutexAutoLock
+ * Acquires the Mutex when it enters scope, and releases it when it leaves
+ * scope.
+ *
+ * MUCH PREFERRED to bare calls to Mutex.Lock and Unlock.
+ */
+template <typename T>
+class MOZ_RAII MOZ_SCOPED_CAPABILITY BaseAutoLock {
+ public:
+ /**
+ * Constructor
+ * The constructor aquires the given lock. The destructor
+ * releases the lock.
+ *
+ * @param aLock A valid mozilla::Mutex* returned by
+ * mozilla::Mutex::NewMutex.
+ **/
+ explicit BaseAutoLock(T aLock) MOZ_CAPABILITY_ACQUIRE(aLock) : mLock(aLock) {
+ mLock.Lock();
+ }
+
+ ~BaseAutoLock(void) MOZ_CAPABILITY_RELEASE() { mLock.Unlock(); }
+
+ // Assert that aLock is the mutex passed to the constructor and that the
+ // current thread owns the mutex. In coding patterns such as:
+ //
+ // void LockedMethod(const BaseAutoLock<T>& aProofOfLock)
+ // {
+ // aProofOfLock.AssertOwns(mMutex);
+ // ...
+ // }
+ //
+ // Without this assertion, it could be that mMutex is not actually
+ // locked. It's possible to have code like:
+ //
+ // BaseAutoLock lock(someMutex);
+ // ...
+ // BaseAutoUnlock unlock(someMutex);
+ // ...
+ // LockedMethod(lock);
+ //
+ // and in such a case, simply asserting that the mutex pointers match is not
+ // sufficient; mutex ownership must be asserted as well.
+ //
+ // Note that if you are going to use the coding pattern presented above, you
+ // should use this method in preference to using AssertCurrentThreadOwns on
+ // the mutex you expected to be held, since this method provides stronger
+ // guarantees.
+ void AssertOwns(const T& aMutex) const MOZ_ASSERT_CAPABILITY(aMutex) {
+ MOZ_ASSERT(&aMutex == &mLock);
+ mLock.AssertCurrentThreadOwns();
+ }
+
+ private:
+ BaseAutoLock() = delete;
+ BaseAutoLock(BaseAutoLock&) = delete;
+ BaseAutoLock& operator=(BaseAutoLock&) = delete;
+ static void* operator new(size_t) noexcept(true);
+
+ friend class BaseAutoUnlock<T>;
+
+ T mLock;
+};
+
+template <typename MutexType>
+BaseAutoLock(MutexType&) -> BaseAutoLock<MutexType&>;
+} // namespace detail
+
+typedef detail::BaseAutoLock<Mutex&> MutexAutoLock;
+typedef detail::BaseAutoLock<MutexSingleWriter&> MutexSingleWriterAutoLock;
+typedef detail::BaseAutoLock<OffTheBooksMutex&> OffTheBooksMutexAutoLock;
+
+// Use if we've done AssertOnWritingThread(), and then later need to take the
+// lock to write to a protected member. Instead of
+// MutexSingleWriterAutoLock lock(mutex)
+// use
+// MutexSingleWriterAutoLockOnThread(lock, mutex)
+#define MutexSingleWriterAutoLockOnThread(lock, mutex) \
+ MOZ_PUSH_IGNORE_THREAD_SAFETY \
+ MutexSingleWriterAutoLock lock(mutex); \
+ MOZ_POP_THREAD_SAFETY
+
+namespace detail {
+/**
+ * ReleasableMutexAutoLock
+ * Acquires the Mutex when it enters scope, and releases it when it leaves
+ * scope. Allows calling Unlock (and Lock) as an alternative to
+ * MutexAutoUnlock; this can avoid an extra lock/unlock pair.
+ *
+ */
+template <typename T>
+class MOZ_RAII MOZ_SCOPED_CAPABILITY ReleasableBaseAutoLock {
+ public:
+ /**
+ * Constructor
+ * The constructor aquires the given lock. The destructor
+ * releases the lock.
+ *
+ * @param aLock A valid mozilla::Mutex& returned by
+ * mozilla::Mutex::NewMutex.
+ **/
+ explicit ReleasableBaseAutoLock(T aLock) MOZ_CAPABILITY_ACQUIRE(aLock)
+ : mLock(aLock) {
+ mLock.Lock();
+ mLocked = true;
+ }
+
+ ~ReleasableBaseAutoLock(void) MOZ_CAPABILITY_RELEASE() {
+ if (mLocked) {
+ Unlock();
+ }
+ }
+
+ void AssertOwns(const T& aMutex) const MOZ_ASSERT_CAPABILITY(aMutex) {
+ MOZ_ASSERT(&aMutex == &mLock);
+ mLock.AssertCurrentThreadOwns();
+ }
+
+ // Allow dropping the lock prematurely; for example to support something like:
+ // clang-format off
+ // MutexAutoLock lock(mMutex);
+ // ...
+ // if (foo) {
+ // lock.Unlock();
+ // MethodThatCantBeCalledWithLock()
+ // return;
+ // }
+ // clang-format on
+ void Unlock() MOZ_CAPABILITY_RELEASE() {
+ MOZ_ASSERT(mLocked);
+ mLock.Unlock();
+ mLocked = false;
+ }
+ void Lock() MOZ_CAPABILITY_ACQUIRE() {
+ MOZ_ASSERT(!mLocked);
+ mLock.Lock();
+ mLocked = true;
+ }
+
+ private:
+ ReleasableBaseAutoLock() = delete;
+ ReleasableBaseAutoLock(ReleasableBaseAutoLock&) = delete;
+ ReleasableBaseAutoLock& operator=(ReleasableBaseAutoLock&) = delete;
+ static void* operator new(size_t) noexcept(true);
+
+ bool mLocked;
+ T mLock;
+};
+
+template <typename MutexType>
+ReleasableBaseAutoLock(MutexType&) -> ReleasableBaseAutoLock<MutexType&>;
+} // namespace detail
+
+typedef detail::ReleasableBaseAutoLock<Mutex&> ReleasableMutexAutoLock;
+
+namespace detail {
+/**
+ * BaseAutoUnlock
+ * Releases the Mutex when it enters scope, and re-acquires it when it leaves
+ * scope.
+ *
+ * MUCH PREFERRED to bare calls to Mutex.Unlock and Lock.
+ */
+template <typename T>
+class MOZ_RAII MOZ_SCOPED_CAPABILITY BaseAutoUnlock {
+ public:
+ explicit BaseAutoUnlock(T aLock) MOZ_SCOPED_UNLOCK_RELEASE(aLock)
+ : mLock(aLock) {
+ mLock.Unlock();
+ }
+
+ explicit BaseAutoUnlock(BaseAutoLock<T>& aAutoLock)
+ /* MOZ_CAPABILITY_RELEASE(aAutoLock.mLock) */
+ : mLock(aAutoLock.mLock) {
+ NS_ASSERTION(mLock, "null lock");
+ mLock->Unlock();
+ }
+
+ ~BaseAutoUnlock() MOZ_SCOPED_UNLOCK_REACQUIRE() { mLock.Lock(); }
+
+ private:
+ BaseAutoUnlock() = delete;
+ BaseAutoUnlock(BaseAutoUnlock&) = delete;
+ BaseAutoUnlock& operator=(BaseAutoUnlock&) = delete;
+ static void* operator new(size_t) noexcept(true);
+
+ T mLock;
+};
+
+template <typename MutexType>
+BaseAutoUnlock(MutexType&) -> BaseAutoUnlock<MutexType&>;
+} // namespace detail
+
+typedef detail::BaseAutoUnlock<Mutex&> MutexAutoUnlock;
+typedef detail::BaseAutoUnlock<MutexSingleWriter&> MutexSingleWriterAutoUnlock;
+typedef detail::BaseAutoUnlock<OffTheBooksMutex&> OffTheBooksMutexAutoUnlock;
+
+namespace detail {
+/**
+ * BaseAutoTryLock
+ * Tries to acquire the Mutex when it enters scope, and releases it when it
+ * leaves scope.
+ *
+ * MUCH PREFERRED to bare calls to Mutex.TryLock and Unlock.
+ */
+template <typename T>
+class MOZ_RAII MOZ_SCOPED_CAPABILITY BaseAutoTryLock {
+ public:
+ explicit BaseAutoTryLock(T& aLock) MOZ_CAPABILITY_ACQUIRE(aLock)
+ : mLock(aLock.TryLock() ? &aLock : nullptr) {}
+
+ ~BaseAutoTryLock() MOZ_CAPABILITY_RELEASE() {
+ if (mLock) {
+ mLock->Unlock();
+ mLock = nullptr;
+ }
+ }
+
+ explicit operator bool() const { return mLock; }
+
+ private:
+ BaseAutoTryLock(BaseAutoTryLock&) = delete;
+ BaseAutoTryLock& operator=(BaseAutoTryLock&) = delete;
+ static void* operator new(size_t) noexcept(true);
+
+ T* mLock;
+};
+} // namespace detail
+
+typedef detail::BaseAutoTryLock<Mutex> MutexAutoTryLock;
+typedef detail::BaseAutoTryLock<OffTheBooksMutex> OffTheBooksMutexAutoTryLock;
+
+} // namespace mozilla
+
+#endif // ifndef mozilla_Mutex_h
diff --git a/xpcom/threads/PerformanceCounter.cpp b/xpcom/threads/PerformanceCounter.cpp
new file mode 100644
index 0000000000..65ee441809
--- /dev/null
+++ b/xpcom/threads/PerformanceCounter.cpp
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Logging.h"
+#include "mozilla/PerformanceCounter.h"
+
+using mozilla::DispatchCategory;
+using mozilla::DispatchCounter;
+using mozilla::PerformanceCounter;
+
+static mozilla::LazyLogModule sPerformanceCounter("PerformanceCounter");
+#ifdef LOG
+# undef LOG
+#endif
+#define LOG(args) MOZ_LOG(sPerformanceCounter, mozilla::LogLevel::Debug, args)
+
+// Global counter used by PerformanceCounter CTOR via NextCounterID().
+static mozilla::Atomic<uint64_t> gNextCounterID(0);
+
+static uint64_t NextCounterID() {
+ // This can return the same value on different processes but
+ // we're fine with this behavior because consumers can use a (pid, counter_id)
+ // tuple to make instances globally unique in a browser session.
+ return ++gNextCounterID;
+}
+
+// this instance is the extension for the worker
+const DispatchCategory DispatchCategory::Worker =
+ DispatchCategory((uint32_t)TaskCategory::Count);
+
+PerformanceCounter::PerformanceCounter(const nsACString& aName)
+ : mExecutionDuration(0),
+ mTotalDispatchCount(0),
+ mDispatchCounter(),
+ mName(aName),
+ mID(NextCounterID()) {
+ LOG(("PerformanceCounter created with ID %" PRIu64, mID));
+}
+
+void PerformanceCounter::IncrementDispatchCounter(DispatchCategory aCategory) {
+ mDispatchCounter[aCategory.GetValue()] += 1;
+ mTotalDispatchCount += 1;
+ LOG(("[%s][%" PRIu64 "] Total dispatch %" PRIu64, mName.get(), GetID(),
+ uint64_t(mTotalDispatchCount)));
+}
+
+void PerformanceCounter::IncrementExecutionDuration(uint32_t aMicroseconds) {
+ mExecutionDuration += aMicroseconds;
+ LOG(("[%s][%" PRIu64 "] Total duration %" PRIu64, mName.get(), GetID(),
+ uint64_t(mExecutionDuration)));
+}
+
+const DispatchCounter& PerformanceCounter::GetDispatchCounter() const {
+ return mDispatchCounter;
+}
+
+uint64_t PerformanceCounter::GetExecutionDuration() const {
+ return mExecutionDuration;
+}
+
+uint64_t PerformanceCounter::GetTotalDispatchCount() const {
+ return mTotalDispatchCount;
+}
+
+uint32_t PerformanceCounter::GetDispatchCount(
+ DispatchCategory aCategory) const {
+ return mDispatchCounter[aCategory.GetValue()];
+}
+
+uint64_t PerformanceCounter::GetID() const { return mID; }
diff --git a/xpcom/threads/PerformanceCounter.h b/xpcom/threads/PerformanceCounter.h
new file mode 100644
index 0000000000..4181320e10
--- /dev/null
+++ b/xpcom/threads/PerformanceCounter.h
@@ -0,0 +1,139 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_PerformanceCounter_h
+#define mozilla_PerformanceCounter_h
+
+#include "mozilla/Array.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/TaskCategory.h"
+#include "nsISupportsImpl.h"
+#include "nsString.h"
+
+namespace mozilla {
+
+/*
+ * The DispatchCategory class is used to fake the inheritance
+ * of the TaskCategory enum so we can extend it to hold
+ * one more value corresponding to the category
+ * we use when a worker dispatches a call.
+ *
+ */
+class DispatchCategory final {
+ public:
+ explicit DispatchCategory(uint32_t aValue) : mValue(aValue) {
+ // Since DispatchCategory is adding one single value to the
+ // TaskCategory enum, we can check here that the value is
+ // the next index e.g. TaskCategory::Count
+ MOZ_ASSERT(aValue == (uint32_t)TaskCategory::Count);
+ }
+
+ constexpr explicit DispatchCategory(TaskCategory aValue)
+ : mValue((uint32_t)aValue) {}
+
+ uint32_t GetValue() const { return mValue; }
+
+ static const DispatchCategory Worker;
+
+ private:
+ uint32_t mValue;
+};
+
+typedef Array<Atomic<uint32_t>, (uint32_t)TaskCategory::Count + 1>
+ DispatchCounter;
+
+// PerformanceCounter is a class that can be used to keep track of
+// runnable execution times and dispatch counts.
+//
+// - runnable execution time: time spent in a runnable when called
+// in nsThread::ProcessNextEvent (not counting recursive calls)
+// - dispatch counts: number of times a tracked runnable is dispatched
+// in nsThread. Useful to measure the activity of a tab or worker.
+//
+// The PerformanceCounter class is currently instantiated in DocGroup
+// and WorkerPrivate in order to count how many scheduler dispatches
+// are done through them, and how long the execution lasts.
+//
+// The execution time is calculated by the nsThread class (and its
+// inherited WorkerThread class) in its ProcessNextEvent method.
+//
+// For each processed runnable, nsThread will reach out the
+// PerformanceCounter attached to the runnable via its DocGroup
+// or WorkerPrivate and call IncrementExecutionDuration()
+//
+// Notice that the execution duration counting takes into account
+// recursivity. If an event triggers a recursive call to
+// nsThread::ProcessNextEVent, the counter will discard the time
+// spent in sub events.
+class PerformanceCounter final {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PerformanceCounter)
+
+ explicit PerformanceCounter(const nsACString& aName);
+
+ /**
+ * This is called everytime a runnable is dispatched.
+ *
+ * aCategory can be used to distinguish counts per TaskCategory
+ *
+ * Note that an overflow will simply reset the counter.
+ */
+ void IncrementDispatchCounter(DispatchCategory aCategory);
+
+ /**
+ * This is called via nsThread::ProcessNextEvent to measure runnable
+ * execution duration.
+ *
+ * Note that an overflow will simply reset the counter.
+ */
+ void IncrementExecutionDuration(uint32_t aMicroseconds);
+
+ /**
+ * Returns a category/counter array of all dispatches.
+ */
+ const DispatchCounter& GetDispatchCounter() const;
+
+ /**
+ * Returns the total execution duration.
+ */
+ uint64_t GetExecutionDuration() const;
+
+ /**
+ * Returns the number of dispatches per TaskCategory.
+ */
+ uint32_t GetDispatchCount(DispatchCategory aCategory) const;
+
+ /**
+ * Returns the total number of dispatches.
+ */
+ uint64_t GetTotalDispatchCount() const;
+
+ /**
+ * Returns the unique id for the instance.
+ *
+ * Used to distinguish instances since the lifespan of
+ * a PerformanceCounter can be shorter than the
+ * host it's tracking. That leads to edge cases
+ * where a counter appears to have values that go
+ * backwards. Having this id let the consumers
+ * detect that they are dealing with a new counter
+ * when it happens.
+ */
+ uint64_t GetID() const;
+
+ private:
+ ~PerformanceCounter() = default;
+
+ Atomic<uint64_t> mExecutionDuration;
+ Atomic<uint64_t> mTotalDispatchCount;
+ DispatchCounter mDispatchCounter;
+ nsCString mName;
+ const uint64_t mID;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_PerformanceCounter_h
diff --git a/xpcom/threads/Queue.h b/xpcom/threads/Queue.h
new file mode 100644
index 0000000000..fa36433fdf
--- /dev/null
+++ b/xpcom/threads/Queue.h
@@ -0,0 +1,265 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_Queue_h
+#define mozilla_Queue_h
+
+#include <utility>
+#include <stdint.h>
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Assertions.h"
+#include "mozalloc.h"
+
+namespace mozilla {
+
+// define to turn on additional (DEBUG) asserts
+// #define EXTRA_ASSERTS 1
+
+// A queue implements a singly linked list of pages, each of which contains some
+// number of elements. Since the queue needs to store a "next" pointer, the
+// actual number of elements per page won't be quite as many as were requested.
+//
+// Each page consists of N entries. We use the head buffer as a circular buffer
+// if it's the only buffer; if we have more than one buffer when the head is
+// empty we release it. This avoids occasional freeing and reallocating buffers
+// every N entries. We'll still allocate and free every N if the normal queue
+// depth is greated than N. A fancier solution would be to move an empty Head
+// buffer to be an empty tail buffer, freeing if we have multiple empty tails,
+// but that probably isn't worth it.
+//
+// Cases:
+// a) single buffer, circular
+// Push: if not full:
+// Add to tail, bump tail and reset to 0 if at end
+// full:
+// Add new page, insert there and set tail to 1
+// Pop:
+// take entry and bump head, reset to 0 if at end
+// b) multiple buffers:
+// Push: if not full:
+// Add to tail, bump tail
+// full:
+// Add new page, insert there and set tail to 1
+// Pop:
+// take entry and bump head, reset to 0 if at end
+// if buffer is empty, free head buffer and promote next to head
+//
+template <class T, size_t RequestedItemsPerPage = 256>
+class Queue {
+ public:
+ Queue() = default;
+
+ Queue(Queue&& aOther) noexcept
+ : mHead(std::exchange(aOther.mHead, nullptr)),
+ mTail(std::exchange(aOther.mTail, nullptr)),
+ mOffsetHead(std::exchange(aOther.mOffsetHead, 0)),
+ mHeadLength(std::exchange(aOther.mHeadLength, 0)),
+ mTailLength(std::exchange(aOther.mTailLength, 0)) {}
+
+ Queue& operator=(Queue&& aOther) noexcept {
+ Clear();
+
+ mHead = std::exchange(aOther.mHead, nullptr);
+ mTail = std::exchange(aOther.mTail, nullptr);
+ mOffsetHead = std::exchange(aOther.mOffsetHead, 0);
+ mHeadLength = std::exchange(aOther.mHeadLength, 0);
+ mTailLength = std::exchange(aOther.mTailLength, 0);
+ return *this;
+ }
+
+ ~Queue() { Clear(); }
+
+ // Discard all elements form the queue, clearing it to be empty.
+ void Clear() {
+ while (!IsEmpty()) {
+ Pop();
+ }
+ if (mHead) {
+ free(mHead);
+ mHead = nullptr;
+ }
+ }
+
+ T& Push(T&& aElement) {
+#if defined(EXTRA_ASSERTS) && DEBUG
+ size_t original_length = Count();
+#endif
+ if (!mHead) {
+ mHead = NewPage();
+ MOZ_ASSERT(mHead);
+
+ mTail = mHead;
+ T* eltPtr = &mTail->mEvents[0];
+ new (eltPtr) T(std::move(aElement));
+ mOffsetHead = 0;
+ mHeadLength = 1;
+#ifdef EXTRA_ASSERTS
+ MOZ_ASSERT(Count() == original_length + 1);
+#endif
+ return *eltPtr;
+ }
+ if ((mHead == mTail && mHeadLength == ItemsPerPage) ||
+ (mHead != mTail && mTailLength == ItemsPerPage)) {
+ // either we have one (circular) buffer and it's full, or
+ // we have multiple buffers and the last buffer is full
+ Page* page = NewPage();
+ MOZ_ASSERT(page);
+
+ mTail->mNext = page;
+ mTail = page;
+ T* eltPtr = &page->mEvents[0];
+ new (eltPtr) T(std::move(aElement));
+ mTailLength = 1;
+#ifdef EXTRA_ASSERTS
+ MOZ_ASSERT(Count() == original_length + 1);
+#endif
+ return *eltPtr;
+ }
+ if (mHead == mTail) {
+ // we have space in the (single) head buffer
+ uint16_t offset = (mOffsetHead + mHeadLength++) % ItemsPerPage;
+ T* eltPtr = &mTail->mEvents[offset];
+ new (eltPtr) T(std::move(aElement));
+#ifdef EXTRA_ASSERTS
+ MOZ_ASSERT(Count() == original_length + 1);
+#endif
+ return *eltPtr;
+ }
+ // else we have space to insert into last buffer
+ T* eltPtr = &mTail->mEvents[mTailLength++];
+ new (eltPtr) T(std::move(aElement));
+#ifdef EXTRA_ASSERTS
+ MOZ_ASSERT(Count() == original_length + 1);
+#endif
+ return *eltPtr;
+ }
+
+ bool IsEmpty() const {
+ return !mHead || (mHead == mTail && mHeadLength == 0);
+ }
+
+ T Pop() {
+#if defined(EXTRA_ASSERTS) && DEBUG
+ size_t original_length = Count();
+#endif
+ MOZ_ASSERT(!IsEmpty());
+
+ T result = std::move(mHead->mEvents[mOffsetHead]);
+ mHead->mEvents[mOffsetHead].~T();
+ mOffsetHead = (mOffsetHead + 1) % ItemsPerPage;
+ mHeadLength -= 1;
+
+ // Check if mHead points to empty (circular) Page and we have more
+ // pages
+ if (mHead != mTail && mHeadLength == 0) {
+ Page* dead = mHead;
+ mHead = mHead->mNext;
+ free(dead);
+ mOffsetHead = 0;
+ // if there are still >1 pages, the new head is full.
+ if (mHead != mTail) {
+ mHeadLength = ItemsPerPage;
+ } else {
+ mHeadLength = mTailLength;
+ mTailLength = 0;
+ }
+ }
+
+#ifdef EXTRA_ASSERTS
+ MOZ_ASSERT(Count() == original_length - 1);
+#endif
+ return result;
+ }
+
+ T& FirstElement() {
+ MOZ_ASSERT(!IsEmpty());
+ return mHead->mEvents[mOffsetHead];
+ }
+
+ const T& FirstElement() const {
+ MOZ_ASSERT(!IsEmpty());
+ return mHead->mEvents[mOffsetHead];
+ }
+
+ T& LastElement() {
+ MOZ_ASSERT(!IsEmpty());
+ uint16_t offset =
+ mHead == mTail ? mOffsetHead + mHeadLength - 1 : mTailLength - 1;
+ return mTail->mEvents[offset];
+ }
+
+ const T& LastElement() const {
+ MOZ_ASSERT(!IsEmpty());
+ uint16_t offset =
+ mHead == mTail ? mOffsetHead + mHeadLength - 1 : mTailLength - 1;
+ return mTail->mEvents[offset];
+ }
+
+ size_t Count() const {
+ // It is obvious count is 0 when the queue is empty.
+ if (!mHead) {
+ return 0;
+ }
+
+ // Compute full (intermediate) pages; Doesn't count first or last page
+ int count = 0;
+ // 1 buffer will have mHead == mTail; 2 will have mHead->mNext == mTail
+ for (Page* page = mHead; page != mTail && page->mNext != mTail;
+ page = page->mNext) {
+ count += ItemsPerPage;
+ }
+ // add first and last page
+ count += mHeadLength + mTailLength;
+ MOZ_ASSERT(count >= 0);
+
+ return count;
+ }
+
+ size_t ShallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
+ size_t n = 0;
+ if (mHead) {
+ for (Page* page = mHead; page != mTail; page = page->mNext) {
+ n += aMallocSizeOf(page);
+ }
+ }
+ return n;
+ }
+
+ size_t ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(this) + ShallowSizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ private:
+ static_assert(
+ (RequestedItemsPerPage & (RequestedItemsPerPage - 1)) == 0,
+ "RequestedItemsPerPage should be a power of two to avoid heap slop.");
+
+ // Since a Page must also contain a "next" pointer, we use one of the items to
+ // store this pointer. If sizeof(T) > sizeof(Page*), then some space will be
+ // wasted. So be it.
+ static const size_t ItemsPerPage = RequestedItemsPerPage - 1;
+
+ // Page objects are linked together to form a simple deque.
+ struct Page {
+ struct Page* mNext;
+ T mEvents[ItemsPerPage];
+ };
+
+ static Page* NewPage() {
+ return static_cast<Page*>(moz_xcalloc(1, sizeof(Page)));
+ }
+
+ Page* mHead = nullptr;
+ Page* mTail = nullptr;
+
+ uint16_t mOffsetHead = 0; // Read position in head page
+ uint16_t mHeadLength = 0; // Number of items in the head page
+ uint16_t mTailLength = 0; // Number of items in the tail page
+};
+
+} // namespace mozilla
+
+#endif // mozilla_Queue_h
diff --git a/xpcom/threads/RWLock.cpp b/xpcom/threads/RWLock.cpp
new file mode 100644
index 0000000000..949934c8cc
--- /dev/null
+++ b/xpcom/threads/RWLock.cpp
@@ -0,0 +1,28 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/RWLock.h"
+
+namespace mozilla {
+
+RWLock::RWLock(const char* aName)
+ : BlockingResourceBase(aName, eMutex)
+#ifdef DEBUG
+ ,
+ mOwningThread(nullptr)
+#endif
+{
+}
+
+#ifdef DEBUG
+bool RWLock::LockedForWritingByCurrentThread() {
+ return mOwningThread == PR_GetCurrentThread();
+}
+#endif
+
+} // namespace mozilla
+
+#undef NativeHandle
diff --git a/xpcom/threads/RWLock.h b/xpcom/threads/RWLock.h
new file mode 100644
index 0000000000..e03d008631
--- /dev/null
+++ b/xpcom/threads/RWLock.h
@@ -0,0 +1,243 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// An interface for read-write locks.
+
+#ifndef mozilla_RWLock_h
+#define mozilla_RWLock_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/BlockingResourceBase.h"
+#include "mozilla/PlatformRWLock.h"
+#include "mozilla/ThreadSafety.h"
+
+namespace mozilla {
+
+// A RWLock is similar to a Mutex, but whereas a Mutex permits only a single
+// reader thread or a single writer thread to access a piece of data, a
+// RWLock distinguishes between readers and writers: you may have multiple
+// reader threads concurrently accessing a piece of data or a single writer
+// thread. This difference should guide your usage of RWLock: if you are not
+// reading the data from multiple threads simultaneously or you are writing
+// to the data roughly as often as read from it, then Mutex will suit your
+// purposes just fine.
+//
+// You should be using the AutoReadLock and AutoWriteLock classes, below,
+// for RAII read locking and write locking, respectively. If you really must
+// take a read lock manually, call the ReadLock method; to relinquish that
+// read lock, call the ReadUnlock method. Similarly, WriteLock and WriteUnlock
+// perform the same operations, but for write locks.
+//
+// It is unspecified what happens when a given thread attempts to acquire the
+// same lock in multiple ways; some underlying implementations of RWLock do
+// support acquiring a read lock multiple times on a given thread, but you
+// should not rely on this behavior.
+//
+// It is unspecified whether RWLock gives priority to waiting readers or
+// a waiting writer when unlocking.
+class MOZ_CAPABILITY("rwlock") RWLock : public detail::RWLockImpl,
+ public BlockingResourceBase {
+ public:
+ explicit RWLock(const char* aName);
+
+#ifdef DEBUG
+ bool LockedForWritingByCurrentThread();
+ [[nodiscard]] bool TryReadLock() MOZ_SHARED_TRYLOCK_FUNCTION(true);
+ void ReadLock() MOZ_ACQUIRE_SHARED();
+ void ReadUnlock() MOZ_RELEASE_SHARED();
+ [[nodiscard]] bool TryWriteLock() MOZ_TRY_ACQUIRE(true);
+ void WriteLock() MOZ_CAPABILITY_ACQUIRE();
+ void WriteUnlock() MOZ_EXCLUSIVE_RELEASE();
+#else
+ [[nodiscard]] bool TryReadLock() MOZ_SHARED_TRYLOCK_FUNCTION(true) {
+ return detail::RWLockImpl::tryReadLock();
+ }
+ void ReadLock() MOZ_ACQUIRE_SHARED() { detail::RWLockImpl::readLock(); }
+ void ReadUnlock() MOZ_RELEASE_SHARED() { detail::RWLockImpl::readUnlock(); }
+ [[nodiscard]] bool TryWriteLock() MOZ_TRY_ACQUIRE(true) {
+ return detail::RWLockImpl::tryWriteLock();
+ }
+ void WriteLock() MOZ_CAPABILITY_ACQUIRE() { detail::RWLockImpl::writeLock(); }
+ void WriteUnlock() MOZ_EXCLUSIVE_RELEASE() {
+ detail::RWLockImpl::writeUnlock();
+ }
+#endif
+
+ private:
+ RWLock() = delete;
+ RWLock(const RWLock&) = delete;
+ RWLock& operator=(const RWLock&) = delete;
+
+#ifdef DEBUG
+ // We record the owning thread for write locks only.
+ PRThread* mOwningThread;
+#endif
+};
+
+// We only use this once; not sure we can add thread safety attributions here
+template <typename T>
+class MOZ_RAII BaseAutoTryReadLock {
+ public:
+ explicit BaseAutoTryReadLock(T& aLock)
+ : mLock(aLock.TryReadLock() ? &aLock : nullptr) {}
+
+ ~BaseAutoTryReadLock() {
+ if (mLock) {
+ mLock->ReadUnlock();
+ }
+ }
+
+ explicit operator bool() const { return mLock; }
+
+ private:
+ BaseAutoTryReadLock() = delete;
+ BaseAutoTryReadLock(const BaseAutoTryReadLock&) = delete;
+ BaseAutoTryReadLock& operator=(const BaseAutoTryReadLock&) = delete;
+
+ T* mLock;
+};
+
+template <typename T>
+class MOZ_SCOPED_CAPABILITY MOZ_RAII BaseAutoReadLock {
+ public:
+ explicit BaseAutoReadLock(T& aLock) MOZ_ACQUIRE_SHARED(aLock)
+ : mLock(&aLock) {
+ MOZ_ASSERT(mLock, "null lock");
+ mLock->ReadLock();
+ }
+
+ // Not MOZ_RELEASE_SHARED(), which would make sense - apparently this trips
+ // over a bug in clang's static analyzer and it says it expected an
+ // exclusive unlock.
+ ~BaseAutoReadLock() MOZ_RELEASE_GENERIC() { mLock->ReadUnlock(); }
+
+ private:
+ BaseAutoReadLock() = delete;
+ BaseAutoReadLock(const BaseAutoReadLock&) = delete;
+ BaseAutoReadLock& operator=(const BaseAutoReadLock&) = delete;
+
+ T* mLock;
+};
+
+// XXX Mutex attributions?
+template <typename T>
+class MOZ_RAII BaseAutoTryWriteLock {
+ public:
+ explicit BaseAutoTryWriteLock(T& aLock)
+ : mLock(aLock.TryWriteLock() ? &aLock : nullptr) {}
+
+ ~BaseAutoTryWriteLock() {
+ if (mLock) {
+ mLock->WriteUnlock();
+ }
+ }
+
+ explicit operator bool() const { return mLock; }
+
+ private:
+ BaseAutoTryWriteLock() = delete;
+ BaseAutoTryWriteLock(const BaseAutoTryWriteLock&) = delete;
+ BaseAutoTryWriteLock& operator=(const BaseAutoTryWriteLock&) = delete;
+
+ T* mLock;
+};
+
+template <typename T>
+class MOZ_SCOPED_CAPABILITY MOZ_RAII BaseAutoWriteLock final {
+ public:
+ explicit BaseAutoWriteLock(T& aLock) MOZ_CAPABILITY_ACQUIRE(aLock)
+ : mLock(&aLock) {
+ MOZ_ASSERT(mLock, "null lock");
+ mLock->WriteLock();
+ }
+
+ ~BaseAutoWriteLock() MOZ_CAPABILITY_RELEASE() { mLock->WriteUnlock(); }
+
+ private:
+ BaseAutoWriteLock() = delete;
+ BaseAutoWriteLock(const BaseAutoWriteLock&) = delete;
+ BaseAutoWriteLock& operator=(const BaseAutoWriteLock&) = delete;
+
+ T* mLock;
+};
+
+// Read try-lock and unlock a RWLock with RAII semantics. Much preferred to
+// bare calls to TryReadLock() and ReadUnlock().
+typedef BaseAutoTryReadLock<RWLock> AutoTryReadLock;
+
+// Read lock and unlock a RWLock with RAII semantics. Much preferred to bare
+// calls to ReadLock() and ReadUnlock().
+typedef BaseAutoReadLock<RWLock> AutoReadLock;
+
+// Write try-lock and unlock a RWLock with RAII semantics. Much preferred to
+// bare calls to TryWriteLock() and WriteUnlock().
+typedef BaseAutoTryWriteLock<RWLock> AutoTryWriteLock;
+
+// Write lock and unlock a RWLock with RAII semantics. Much preferred to bare
+// calls to WriteLock() and WriteUnlock().
+typedef BaseAutoWriteLock<RWLock> AutoWriteLock;
+
+class MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS MOZ_CAPABILITY("rwlock")
+ StaticRWLock {
+ public:
+ // In debug builds, check that mLock is initialized for us as we expect by
+ // the compiler. In non-debug builds, don't declare a constructor so that
+ // the compiler can see that the constructor is trivial.
+#ifdef DEBUG
+ StaticRWLock() { MOZ_ASSERT(!mLock); }
+#endif
+
+ [[nodiscard]] bool TryReadLock() MOZ_SHARED_TRYLOCK_FUNCTION(true) {
+ return Lock()->TryReadLock();
+ }
+ void ReadLock() MOZ_ACQUIRE_SHARED() { Lock()->ReadLock(); }
+ void ReadUnlock() MOZ_RELEASE_SHARED() { Lock()->ReadUnlock(); }
+ [[nodiscard]] bool TryWriteLock() MOZ_TRY_ACQUIRE(true) {
+ return Lock()->TryWriteLock();
+ }
+ void WriteLock() MOZ_CAPABILITY_ACQUIRE() { Lock()->WriteLock(); }
+ void WriteUnlock() MOZ_EXCLUSIVE_RELEASE() { Lock()->WriteUnlock(); }
+
+ private:
+ [[nodiscard]] RWLock* Lock() MOZ_RETURN_CAPABILITY(*mLock) {
+ if (mLock) {
+ return mLock;
+ }
+
+ RWLock* lock = new RWLock("StaticRWLock");
+ if (!mLock.compareExchange(nullptr, lock)) {
+ delete lock;
+ }
+
+ return mLock;
+ }
+
+ Atomic<RWLock*> mLock;
+
+ // Disallow copy constructor, but only in debug mode. We only define
+ // a default constructor in debug mode (see above); if we declared
+ // this constructor always, the compiler wouldn't generate a trivial
+ // default constructor for us in non-debug mode.
+#ifdef DEBUG
+ StaticRWLock(const StaticRWLock& aOther);
+#endif
+
+ // Disallow these operators.
+ StaticRWLock& operator=(StaticRWLock* aRhs) = delete;
+ static void* operator new(size_t) noexcept(true) = delete;
+ static void operator delete(void*) = delete;
+};
+
+typedef BaseAutoTryReadLock<StaticRWLock> StaticAutoTryReadLock;
+typedef BaseAutoReadLock<StaticRWLock> StaticAutoReadLock;
+typedef BaseAutoTryWriteLock<StaticRWLock> StaticAutoTryWriteLock;
+typedef BaseAutoWriteLock<StaticRWLock> StaticAutoWriteLock;
+
+} // namespace mozilla
+
+#endif // mozilla_RWLock_h
diff --git a/xpcom/threads/RecursiveMutex.cpp b/xpcom/threads/RecursiveMutex.cpp
new file mode 100644
index 0000000000..7c45052c07
--- /dev/null
+++ b/xpcom/threads/RecursiveMutex.cpp
@@ -0,0 +1,85 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/RecursiveMutex.h"
+
+#ifdef XP_WIN
+# include <windows.h>
+
+# define NativeHandle(m) (reinterpret_cast<CRITICAL_SECTION*>(&m))
+#endif
+
+namespace mozilla {
+
+RecursiveMutex::RecursiveMutex(const char* aName)
+ : BlockingResourceBase(aName, eRecursiveMutex)
+#ifdef DEBUG
+ ,
+ mOwningThread(nullptr),
+ mEntryCount(0)
+#endif
+{
+#ifdef XP_WIN
+ // This number was adapted from NSPR.
+ static const DWORD sLockSpinCount = 100;
+
+# if defined(RELEASE_OR_BETA)
+ // Vista and later automatically allocate and subsequently leak a debug info
+ // object for each critical section that we allocate unless we tell the
+ // system not to do that.
+ DWORD flags = CRITICAL_SECTION_NO_DEBUG_INFO;
+# else
+ DWORD flags = 0;
+# endif
+ BOOL r =
+ InitializeCriticalSectionEx(NativeHandle(mMutex), sLockSpinCount, flags);
+ MOZ_RELEASE_ASSERT(r);
+#else
+ pthread_mutexattr_t attr;
+
+ MOZ_RELEASE_ASSERT(pthread_mutexattr_init(&attr) == 0,
+ "pthread_mutexattr_init failed");
+
+ MOZ_RELEASE_ASSERT(
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) == 0,
+ "pthread_mutexattr_settype failed");
+
+ MOZ_RELEASE_ASSERT(pthread_mutex_init(&mMutex, &attr) == 0,
+ "pthread_mutex_init failed");
+
+ MOZ_RELEASE_ASSERT(pthread_mutexattr_destroy(&attr) == 0,
+ "pthread_mutexattr_destroy failed");
+#endif
+}
+
+RecursiveMutex::~RecursiveMutex() {
+#ifdef XP_WIN
+ DeleteCriticalSection(NativeHandle(mMutex));
+#else
+ MOZ_RELEASE_ASSERT(pthread_mutex_destroy(&mMutex) == 0,
+ "pthread_mutex_destroy failed");
+#endif
+}
+
+void RecursiveMutex::LockInternal() {
+#ifdef XP_WIN
+ EnterCriticalSection(NativeHandle(mMutex));
+#else
+ MOZ_RELEASE_ASSERT(pthread_mutex_lock(&mMutex) == 0,
+ "pthread_mutex_lock failed");
+#endif
+}
+
+void RecursiveMutex::UnlockInternal() {
+#ifdef XP_WIN
+ LeaveCriticalSection(NativeHandle(mMutex));
+#else
+ MOZ_RELEASE_ASSERT(pthread_mutex_unlock(&mMutex) == 0,
+ "pthread_mutex_unlock failed");
+#endif
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/RecursiveMutex.h b/xpcom/threads/RecursiveMutex.h
new file mode 100644
index 0000000000..dde21c9a35
--- /dev/null
+++ b/xpcom/threads/RecursiveMutex.h
@@ -0,0 +1,120 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// A lock that can be acquired multiple times on the same thread.
+
+#ifndef mozilla_RecursiveMutex_h
+#define mozilla_RecursiveMutex_h
+
+#include "mozilla/ThreadSafety.h"
+#include "mozilla/BlockingResourceBase.h"
+
+#ifndef XP_WIN
+# include <pthread.h>
+#endif
+
+namespace mozilla {
+
+class MOZ_CAPABILITY("recursive mutex") RecursiveMutex
+ : public BlockingResourceBase {
+ public:
+ explicit RecursiveMutex(const char* aName);
+ ~RecursiveMutex();
+
+#ifdef DEBUG
+ void Lock() MOZ_CAPABILITY_ACQUIRE();
+ void Unlock() MOZ_CAPABILITY_RELEASE();
+#else
+ void Lock() MOZ_CAPABILITY_ACQUIRE() { LockInternal(); }
+ void Unlock() MOZ_CAPABILITY_RELEASE() { UnlockInternal(); }
+#endif
+
+#ifdef DEBUG
+ /**
+ * AssertCurrentThreadIn
+ **/
+ void AssertCurrentThreadIn() const MOZ_ASSERT_CAPABILITY(this);
+ /**
+ * AssertNotCurrentThreadIn
+ **/
+ void AssertNotCurrentThreadIn() const MOZ_EXCLUDES(this) {
+ // Not currently implemented. See bug 476536 for discussion.
+ }
+#else
+ void AssertCurrentThreadIn() const MOZ_ASSERT_CAPABILITY(this) {}
+ void AssertNotCurrentThreadIn() const MOZ_EXCLUDES(this) {}
+#endif
+
+ private:
+ RecursiveMutex() = delete;
+ RecursiveMutex(const RecursiveMutex&) = delete;
+ RecursiveMutex& operator=(const RecursiveMutex&) = delete;
+
+ void LockInternal();
+ void UnlockInternal();
+
+#ifdef DEBUG
+ PRThread* mOwningThread;
+ size_t mEntryCount;
+#endif
+
+#if !defined(XP_WIN)
+ pthread_mutex_t mMutex;
+#else
+ // We eschew including windows.h and using CRITICAL_SECTION here so that files
+ // including us don't also pull in windows.h. Just use a type that's big
+ // enough for CRITICAL_SECTION, and we'll fix it up later.
+ void* mMutex[6];
+#endif
+};
+
+class MOZ_RAII MOZ_SCOPED_CAPABILITY RecursiveMutexAutoLock {
+ public:
+ explicit RecursiveMutexAutoLock(RecursiveMutex& aRecursiveMutex)
+ MOZ_CAPABILITY_ACQUIRE(aRecursiveMutex)
+ : mRecursiveMutex(&aRecursiveMutex) {
+ NS_ASSERTION(mRecursiveMutex, "null mutex");
+ mRecursiveMutex->Lock();
+ }
+
+ ~RecursiveMutexAutoLock(void) MOZ_CAPABILITY_RELEASE() {
+ mRecursiveMutex->Unlock();
+ }
+
+ private:
+ RecursiveMutexAutoLock() = delete;
+ RecursiveMutexAutoLock(const RecursiveMutexAutoLock&) = delete;
+ RecursiveMutexAutoLock& operator=(const RecursiveMutexAutoLock&) = delete;
+ static void* operator new(size_t) noexcept(true);
+
+ mozilla::RecursiveMutex* mRecursiveMutex;
+};
+
+class MOZ_RAII MOZ_SCOPED_CAPABILITY RecursiveMutexAutoUnlock {
+ public:
+ explicit RecursiveMutexAutoUnlock(RecursiveMutex& aRecursiveMutex)
+ MOZ_SCOPED_UNLOCK_RELEASE(aRecursiveMutex)
+ : mRecursiveMutex(&aRecursiveMutex) {
+ NS_ASSERTION(mRecursiveMutex, "null mutex");
+ mRecursiveMutex->Unlock();
+ }
+
+ ~RecursiveMutexAutoUnlock(void) MOZ_SCOPED_UNLOCK_REACQUIRE() {
+ mRecursiveMutex->Lock();
+ }
+
+ private:
+ RecursiveMutexAutoUnlock() = delete;
+ RecursiveMutexAutoUnlock(const RecursiveMutexAutoUnlock&) = delete;
+ RecursiveMutexAutoUnlock& operator=(const RecursiveMutexAutoUnlock&) = delete;
+ static void* operator new(size_t) noexcept(true);
+
+ mozilla::RecursiveMutex* mRecursiveMutex;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_RecursiveMutex_h
diff --git a/xpcom/threads/ReentrantMonitor.h b/xpcom/threads/ReentrantMonitor.h
new file mode 100644
index 0000000000..09debad577
--- /dev/null
+++ b/xpcom/threads/ReentrantMonitor.h
@@ -0,0 +1,251 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_ReentrantMonitor_h
+#define mozilla_ReentrantMonitor_h
+
+#include "prmon.h"
+
+#if defined(MOZILLA_INTERNAL_API) && !defined(DEBUG)
+# include "mozilla/ProfilerThreadSleep.h"
+#endif // defined( MOZILLA_INTERNAL_API) && !defined(DEBUG)
+
+#include "mozilla/BlockingResourceBase.h"
+#include "mozilla/ThreadSafety.h"
+#include "nsISupports.h"
+//
+// Provides:
+//
+// - ReentrantMonitor, a Java-like monitor
+// - ReentrantMonitorAutoEnter, an RAII class for ensuring that
+// ReentrantMonitors are properly entered and exited
+//
+// Using ReentrantMonitorAutoEnter is MUCH preferred to making bare calls to
+// ReentrantMonitor.Enter and Exit.
+//
+namespace mozilla {
+
+/**
+ * ReentrantMonitor
+ * Java-like monitor.
+ * When possible, use ReentrantMonitorAutoEnter to hold this monitor within a
+ * scope, instead of calling Enter/Exit directly.
+ **/
+class MOZ_CAPABILITY("reentrant monitor") ReentrantMonitor
+ : BlockingResourceBase {
+ public:
+ /**
+ * ReentrantMonitor
+ * @param aName A name which can reference this monitor
+ */
+ explicit ReentrantMonitor(const char* aName)
+ : BlockingResourceBase(aName, eReentrantMonitor)
+#ifdef DEBUG
+ ,
+ mEntryCount(0)
+#endif
+ {
+ MOZ_COUNT_CTOR(ReentrantMonitor);
+ mReentrantMonitor = PR_NewMonitor();
+ if (!mReentrantMonitor) {
+ MOZ_CRASH("Can't allocate mozilla::ReentrantMonitor");
+ }
+ }
+
+ /**
+ * ~ReentrantMonitor
+ **/
+ ~ReentrantMonitor() {
+ NS_ASSERTION(mReentrantMonitor,
+ "improperly constructed ReentrantMonitor or double free");
+ PR_DestroyMonitor(mReentrantMonitor);
+ mReentrantMonitor = 0;
+ MOZ_COUNT_DTOR(ReentrantMonitor);
+ }
+
+#ifndef DEBUG
+ /**
+ * Enter
+ * @see prmon.h
+ **/
+ void Enter() MOZ_CAPABILITY_ACQUIRE() { PR_EnterMonitor(mReentrantMonitor); }
+
+ /**
+ * Exit
+ * @see prmon.h
+ **/
+ void Exit() MOZ_CAPABILITY_RELEASE() { PR_ExitMonitor(mReentrantMonitor); }
+
+ /**
+ * Wait
+ * @see prmon.h
+ **/
+ nsresult Wait(PRIntervalTime aInterval = PR_INTERVAL_NO_TIMEOUT) {
+ PR_ASSERT_CURRENT_THREAD_IN_MONITOR(mReentrantMonitor);
+# ifdef MOZILLA_INTERNAL_API
+ AUTO_PROFILER_THREAD_SLEEP;
+# endif // MOZILLA_INTERNAL_API
+ return PR_Wait(mReentrantMonitor, aInterval) == PR_SUCCESS
+ ? NS_OK
+ : NS_ERROR_FAILURE;
+ }
+
+#else // ifndef DEBUG
+ void Enter() MOZ_CAPABILITY_ACQUIRE();
+ void Exit() MOZ_CAPABILITY_RELEASE();
+ nsresult Wait(PRIntervalTime aInterval = PR_INTERVAL_NO_TIMEOUT);
+
+#endif // ifndef DEBUG
+
+ /**
+ * Notify
+ * @see prmon.h
+ **/
+ nsresult Notify() {
+ return PR_Notify(mReentrantMonitor) == PR_SUCCESS ? NS_OK
+ : NS_ERROR_FAILURE;
+ }
+
+ /**
+ * NotifyAll
+ * @see prmon.h
+ **/
+ nsresult NotifyAll() {
+ return PR_NotifyAll(mReentrantMonitor) == PR_SUCCESS ? NS_OK
+ : NS_ERROR_FAILURE;
+ }
+
+#ifdef DEBUG
+ /**
+ * AssertCurrentThreadIn
+ * @see prmon.h
+ **/
+ void AssertCurrentThreadIn() MOZ_ASSERT_CAPABILITY(this) {
+ PR_ASSERT_CURRENT_THREAD_IN_MONITOR(mReentrantMonitor);
+ }
+
+ /**
+ * AssertNotCurrentThreadIn
+ * @see prmon.h
+ **/
+ void AssertNotCurrentThreadIn() MOZ_ASSERT_CAPABILITY(!this) {
+ // FIXME bug 476536
+ }
+
+#else
+ void AssertCurrentThreadIn() MOZ_ASSERT_CAPABILITY(this) {}
+ void AssertNotCurrentThreadIn() MOZ_ASSERT_CAPABILITY(!this) {}
+
+#endif // ifdef DEBUG
+
+ private:
+ ReentrantMonitor();
+ ReentrantMonitor(const ReentrantMonitor&);
+ ReentrantMonitor& operator=(const ReentrantMonitor&);
+
+ PRMonitor* mReentrantMonitor;
+#ifdef DEBUG
+ int32_t mEntryCount;
+#endif
+};
+
+/**
+ * ReentrantMonitorAutoEnter
+ * Enters the ReentrantMonitor when it enters scope, and exits it when
+ * it leaves scope.
+ *
+ * MUCH PREFERRED to bare calls to ReentrantMonitor.Enter and Exit.
+ */
+class MOZ_SCOPED_CAPABILITY MOZ_STACK_CLASS ReentrantMonitorAutoEnter {
+ public:
+ /**
+ * Constructor
+ * The constructor aquires the given lock. The destructor
+ * releases the lock.
+ *
+ * @param aReentrantMonitor A valid mozilla::ReentrantMonitor*.
+ **/
+ explicit ReentrantMonitorAutoEnter(
+ mozilla::ReentrantMonitor& aReentrantMonitor)
+ MOZ_CAPABILITY_ACQUIRE(aReentrantMonitor)
+ : mReentrantMonitor(&aReentrantMonitor) {
+ NS_ASSERTION(mReentrantMonitor, "null monitor");
+ mReentrantMonitor->Enter();
+ }
+
+ ~ReentrantMonitorAutoEnter(void) MOZ_CAPABILITY_RELEASE() {
+ mReentrantMonitor->Exit();
+ }
+
+ nsresult Wait(PRIntervalTime aInterval = PR_INTERVAL_NO_TIMEOUT) {
+ return mReentrantMonitor->Wait(aInterval);
+ }
+
+ nsresult Notify() { return mReentrantMonitor->Notify(); }
+ nsresult NotifyAll() { return mReentrantMonitor->NotifyAll(); }
+
+ private:
+ ReentrantMonitorAutoEnter();
+ ReentrantMonitorAutoEnter(const ReentrantMonitorAutoEnter&);
+ ReentrantMonitorAutoEnter& operator=(const ReentrantMonitorAutoEnter&);
+ static void* operator new(size_t) noexcept(true);
+
+ friend class ReentrantMonitorAutoExit;
+
+ mozilla::ReentrantMonitor* mReentrantMonitor;
+};
+
+/**
+ * ReentrantMonitorAutoExit
+ * Exit the ReentrantMonitor when it enters scope, and enters it when it leaves
+ * scope.
+ *
+ * MUCH PREFERRED to bare calls to ReentrantMonitor.Exit and Enter.
+ */
+class MOZ_SCOPED_CAPABILITY MOZ_STACK_CLASS ReentrantMonitorAutoExit {
+ public:
+ /**
+ * Constructor
+ * The constructor releases the given lock. The destructor
+ * acquires the lock. The lock must be held before constructing
+ * this object!
+ *
+ * @param aReentrantMonitor A valid mozilla::ReentrantMonitor*. It
+ * must be already locked.
+ **/
+ explicit ReentrantMonitorAutoExit(ReentrantMonitor& aReentrantMonitor)
+ MOZ_EXCLUSIVE_RELEASE(aReentrantMonitor)
+ : mReentrantMonitor(&aReentrantMonitor) {
+ NS_ASSERTION(mReentrantMonitor, "null monitor");
+ mReentrantMonitor->AssertCurrentThreadIn();
+ mReentrantMonitor->Exit();
+ }
+
+ explicit ReentrantMonitorAutoExit(
+ ReentrantMonitorAutoEnter& aReentrantMonitorAutoEnter)
+ MOZ_EXCLUSIVE_RELEASE(aReentrantMonitorAutoEnter.mReentrantMonitor)
+ : mReentrantMonitor(aReentrantMonitorAutoEnter.mReentrantMonitor) {
+ NS_ASSERTION(mReentrantMonitor, "null monitor");
+ mReentrantMonitor->AssertCurrentThreadIn();
+ mReentrantMonitor->Exit();
+ }
+
+ ~ReentrantMonitorAutoExit(void) MOZ_EXCLUSIVE_RELEASE() {
+ mReentrantMonitor->Enter();
+ }
+
+ private:
+ ReentrantMonitorAutoExit();
+ ReentrantMonitorAutoExit(const ReentrantMonitorAutoExit&);
+ ReentrantMonitorAutoExit& operator=(const ReentrantMonitorAutoExit&);
+ static void* operator new(size_t) noexcept(true);
+
+ ReentrantMonitor* mReentrantMonitor;
+};
+
+} // namespace mozilla
+
+#endif // ifndef mozilla_ReentrantMonitor_h
diff --git a/xpcom/threads/SchedulerGroup.cpp b/xpcom/threads/SchedulerGroup.cpp
new file mode 100644
index 0000000000..1bb8c615fc
--- /dev/null
+++ b/xpcom/threads/SchedulerGroup.cpp
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/SchedulerGroup.h"
+
+#include <utility>
+
+#include "jsfriendapi.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/Unused.h"
+#include "mozilla/dom/DocGroup.h"
+#include "mozilla/dom/ScriptSettings.h"
+#include "nsINamed.h"
+#include "nsQueryObject.h"
+#include "nsThreadUtils.h"
+
+using namespace mozilla;
+
+/* static */
+nsresult SchedulerGroup::UnlabeledDispatch(
+ TaskCategory aCategory, already_AddRefed<nsIRunnable>&& aRunnable) {
+ if (NS_IsMainThread()) {
+ return NS_DispatchToCurrentThread(std::move(aRunnable));
+ } else {
+ return NS_DispatchToMainThread(std::move(aRunnable));
+ }
+}
+
+/* static */
+nsresult SchedulerGroup::Dispatch(TaskCategory aCategory,
+ already_AddRefed<nsIRunnable>&& aRunnable) {
+ return LabeledDispatch(aCategory, std::move(aRunnable), nullptr);
+}
+
+/* static */
+nsresult SchedulerGroup::LabeledDispatch(
+ TaskCategory aCategory, already_AddRefed<nsIRunnable>&& aRunnable,
+ mozilla::PerformanceCounter* aPerformanceCounter) {
+ nsCOMPtr<nsIRunnable> runnable(aRunnable);
+ if (XRE_IsContentProcess()) {
+ RefPtr<Runnable> internalRunnable =
+ new Runnable(runnable.forget(), aPerformanceCounter);
+ return InternalUnlabeledDispatch(aCategory, internalRunnable.forget());
+ }
+ return UnlabeledDispatch(aCategory, runnable.forget());
+}
+
+/*static*/
+nsresult SchedulerGroup::InternalUnlabeledDispatch(
+ TaskCategory aCategory, already_AddRefed<Runnable>&& aRunnable) {
+ if (NS_IsMainThread()) {
+ // NS_DispatchToCurrentThread will not leak the passed in runnable
+ // when it fails, so we don't need to do anything special.
+ return NS_DispatchToCurrentThread(std::move(aRunnable));
+ }
+
+ RefPtr<Runnable> runnable(aRunnable);
+ nsresult rv = NS_DispatchToMainThread(do_AddRef(runnable));
+ if (NS_FAILED(rv)) {
+ // Dispatch failed. This is a situation where we would have used
+ // NS_DispatchToMainThread rather than calling into the SchedulerGroup
+ // machinery, and the caller would be expecting to leak the nsIRunnable
+ // originally passed in. But because we've had to wrap things up
+ // internally, we were going to leak the nsIRunnable *and* our Runnable
+ // wrapper. But there's no reason that we have to leak our Runnable
+ // wrapper; we can just leak the wrapped nsIRunnable, and let the caller
+ // take care of unleaking it if they need to.
+ Unused << runnable->mRunnable.forget().take();
+ nsrefcnt refcnt = runnable.get()->Release();
+ MOZ_RELEASE_ASSERT(refcnt == 1, "still holding an unexpected reference!");
+ }
+
+ return rv;
+}
+
+SchedulerGroup::Runnable::Runnable(
+ already_AddRefed<nsIRunnable>&& aRunnable,
+ mozilla::PerformanceCounter* aPerformanceCounter)
+ : mozilla::Runnable("SchedulerGroup::Runnable"),
+ mRunnable(std::move(aRunnable)),
+ mPerformanceCounter(aPerformanceCounter) {}
+
+mozilla::PerformanceCounter* SchedulerGroup::Runnable::GetPerformanceCounter()
+ const {
+ return mPerformanceCounter;
+}
+
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+NS_IMETHODIMP
+SchedulerGroup::Runnable::GetName(nsACString& aName) {
+ // Try to get a name from the underlying runnable.
+ nsCOMPtr<nsINamed> named = do_QueryInterface(mRunnable);
+ if (named) {
+ named->GetName(aName);
+ }
+ if (aName.IsEmpty()) {
+ aName.AssignLiteral("anonymous");
+ }
+
+ return NS_OK;
+}
+#endif
+
+NS_IMETHODIMP
+SchedulerGroup::Runnable::Run() {
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
+ // The runnable's destructor can have side effects, so try to execute it in
+ // the scope of the SchedulerGroup.
+ nsCOMPtr<nsIRunnable> runnable(std::move(mRunnable));
+ return runnable->Run();
+}
+
+NS_IMETHODIMP
+SchedulerGroup::Runnable::GetPriority(uint32_t* aPriority) {
+ *aPriority = nsIRunnablePriority::PRIORITY_NORMAL;
+ nsCOMPtr<nsIRunnablePriority> runnablePrio = do_QueryInterface(mRunnable);
+ return runnablePrio ? runnablePrio->GetPriority(aPriority) : NS_OK;
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(SchedulerGroup::Runnable, mozilla::Runnable,
+ nsIRunnablePriority, SchedulerGroup::Runnable)
diff --git a/xpcom/threads/SchedulerGroup.h b/xpcom/threads/SchedulerGroup.h
new file mode 100644
index 0000000000..76dbfb12f8
--- /dev/null
+++ b/xpcom/threads/SchedulerGroup.h
@@ -0,0 +1,87 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_SchedulerGroup_h
+#define mozilla_SchedulerGroup_h
+
+#include "mozilla/RefPtr.h"
+#include "mozilla/TaskCategory.h"
+#include "mozilla/PerformanceCounter.h"
+#include "nsCOMPtr.h"
+#include "nsID.h"
+#include "nsIRunnable.h"
+#include "nsISupports.h"
+#include "nsStringFwd.h"
+#include "nsThreadUtils.h"
+#include "nscore.h"
+
+class nsIEventTarget;
+class nsIRunnable;
+class nsISerialEventTarget;
+
+namespace mozilla {
+class AbstractThread;
+namespace dom {
+class DocGroup;
+} // namespace dom
+
+#define NS_SCHEDULERGROUPRUNNABLE_IID \
+ { \
+ 0xd31b7420, 0x872b, 0x4cfb, { \
+ 0xa9, 0xc6, 0xae, 0x4c, 0x0f, 0x06, 0x36, 0x74 \
+ } \
+ }
+
+class SchedulerGroup {
+ public:
+ class Runnable final : public mozilla::Runnable, public nsIRunnablePriority {
+ public:
+ Runnable(already_AddRefed<nsIRunnable>&& aRunnable,
+ mozilla::PerformanceCounter* aPerformanceCounter);
+
+ mozilla::PerformanceCounter* GetPerformanceCounter() const;
+
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ NS_IMETHOD GetName(nsACString& aName) override;
+#endif
+
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_NSIRUNNABLE
+ NS_DECL_NSIRUNNABLEPRIORITY
+
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_SCHEDULERGROUPRUNNABLE_IID);
+
+ private:
+ friend class SchedulerGroup;
+
+ ~Runnable() = default;
+
+ nsCOMPtr<nsIRunnable> mRunnable;
+ RefPtr<mozilla::PerformanceCounter> mPerformanceCounter;
+ };
+ friend class Runnable;
+
+ static nsresult Dispatch(TaskCategory aCategory,
+ already_AddRefed<nsIRunnable>&& aRunnable);
+
+ static nsresult UnlabeledDispatch(TaskCategory aCategory,
+ already_AddRefed<nsIRunnable>&& aRunnable);
+
+ static nsresult LabeledDispatch(
+ TaskCategory aCategory, already_AddRefed<nsIRunnable>&& aRunnable,
+ mozilla::PerformanceCounter* aPerformanceCounter);
+
+ protected:
+ static nsresult InternalUnlabeledDispatch(
+ TaskCategory aCategory, already_AddRefed<Runnable>&& aRunnable);
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(SchedulerGroup::Runnable,
+ NS_SCHEDULERGROUPRUNNABLE_IID);
+
+} // namespace mozilla
+
+#endif // mozilla_SchedulerGroup_h
diff --git a/xpcom/threads/SharedThreadPool.cpp b/xpcom/threads/SharedThreadPool.cpp
new file mode 100644
index 0000000000..a2de1c4495
--- /dev/null
+++ b/xpcom/threads/SharedThreadPool.cpp
@@ -0,0 +1,221 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/SharedThreadPool.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/ReentrantMonitor.h"
+#include "mozilla/Services.h"
+#include "mozilla/SpinEventLoopUntil.h"
+#include "mozilla/StaticPtr.h"
+#include "nsTHashMap.h"
+#include "nsXPCOMCIDInternal.h"
+#include "nsComponentManagerUtils.h"
+#include "nsIObserver.h"
+#include "nsIObserverService.h"
+#include "nsIThreadManager.h"
+#include "nsThreadPool.h"
+
+namespace mozilla {
+
+// Created and destroyed on the main thread.
+static StaticAutoPtr<ReentrantMonitor> sMonitor;
+
+// Hashtable, maps thread pool name to SharedThreadPool instance.
+// Modified only on the main thread.
+static StaticAutoPtr<nsTHashMap<nsCStringHashKey, SharedThreadPool*>> sPools;
+
+static already_AddRefed<nsIThreadPool> CreateThreadPool(const nsCString& aName);
+
+class SharedThreadPoolShutdownObserver : public nsIObserver {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIOBSERVER
+ protected:
+ virtual ~SharedThreadPoolShutdownObserver() = default;
+};
+
+NS_IMPL_ISUPPORTS(SharedThreadPoolShutdownObserver, nsIObserver, nsISupports)
+
+NS_IMETHODIMP
+SharedThreadPoolShutdownObserver::Observe(nsISupports* aSubject,
+ const char* aTopic,
+ const char16_t* aData) {
+ MOZ_RELEASE_ASSERT(!strcmp(aTopic, "xpcom-shutdown-threads"));
+#ifdef EARLY_BETA_OR_EARLIER
+ {
+ ReentrantMonitorAutoEnter mon(*sMonitor);
+ if (!sPools->IsEmpty()) {
+ nsAutoCString str;
+ for (const auto& key : sPools->Keys()) {
+ str.AppendPrintf("\"%s\" ", nsAutoCString(key).get());
+ }
+ printf_stderr(
+ "SharedThreadPool in xpcom-shutdown-threads. Waiting for "
+ "pools %s\n",
+ str.get());
+ }
+ }
+#endif
+ SharedThreadPool::SpinUntilEmpty();
+ sMonitor = nullptr;
+ sPools = nullptr;
+ return NS_OK;
+}
+
+void SharedThreadPool::InitStatics() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(!sMonitor && !sPools);
+ sMonitor = new ReentrantMonitor("SharedThreadPool");
+ sPools = new nsTHashMap<nsCStringHashKey, SharedThreadPool*>();
+ nsCOMPtr<nsIObserverService> obsService =
+ mozilla::services::GetObserverService();
+ nsCOMPtr<nsIObserver> obs = new SharedThreadPoolShutdownObserver();
+ obsService->AddObserver(obs, "xpcom-shutdown-threads", false);
+}
+
+/* static */
+bool SharedThreadPool::IsEmpty() {
+ ReentrantMonitorAutoEnter mon(*sMonitor);
+ return !sPools->Count();
+}
+
+/* static */
+void SharedThreadPool::SpinUntilEmpty() {
+ MOZ_ASSERT(NS_IsMainThread());
+ SpinEventLoopUntil("SharedThreadPool::SpinUntilEmpty"_ns, []() -> bool {
+ sMonitor->AssertNotCurrentThreadIn();
+ return IsEmpty();
+ });
+}
+
+already_AddRefed<SharedThreadPool> SharedThreadPool::Get(
+ const nsCString& aName, uint32_t aThreadLimit) {
+ MOZ_ASSERT(sMonitor && sPools);
+ ReentrantMonitorAutoEnter mon(*sMonitor);
+ RefPtr<SharedThreadPool> pool;
+
+ return sPools->WithEntryHandle(
+ aName, [&](auto&& entry) -> already_AddRefed<SharedThreadPool> {
+ if (entry) {
+ pool = entry.Data();
+ if (NS_FAILED(pool->EnsureThreadLimitIsAtLeast(aThreadLimit))) {
+ NS_WARNING("Failed to set limits on thread pool");
+ }
+ } else {
+ nsCOMPtr<nsIThreadPool> threadPool(CreateThreadPool(aName));
+ if (NS_WARN_IF(!threadPool)) {
+ sPools->Remove(aName); // XXX entry.Remove()
+ return nullptr;
+ }
+ pool = new SharedThreadPool(aName, threadPool);
+
+ // Set the thread and idle limits. Note that we don't rely on the
+ // EnsureThreadLimitIsAtLeast() call below, as the default thread
+ // limit is 4, and if aThreadLimit is less than 4 we'll end up with a
+ // pool with 4 threads rather than what we expected; so we'll have
+ // unexpected behaviour.
+ nsresult rv = pool->SetThreadLimit(aThreadLimit);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ sPools->Remove(aName); // XXX entry.Remove()
+ return nullptr;
+ }
+
+ rv = pool->SetIdleThreadLimit(aThreadLimit);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ sPools->Remove(aName); // XXX entry.Remove()
+ return nullptr;
+ }
+
+ entry.Insert(pool.get());
+ }
+
+ return pool.forget();
+ });
+}
+
+NS_IMETHODIMP_(MozExternalRefCountType) SharedThreadPool::AddRef(void) {
+ MOZ_ASSERT(sMonitor);
+ ReentrantMonitorAutoEnter mon(*sMonitor);
+ MOZ_ASSERT(int32_t(mRefCnt) >= 0, "illegal refcnt");
+ nsrefcnt count = ++mRefCnt;
+ NS_LOG_ADDREF(this, count, "SharedThreadPool", sizeof(*this));
+ return count;
+}
+
+NS_IMETHODIMP_(MozExternalRefCountType) SharedThreadPool::Release(void) {
+ MOZ_ASSERT(sMonitor);
+ ReentrantMonitorAutoEnter mon(*sMonitor);
+ nsrefcnt count = --mRefCnt;
+ NS_LOG_RELEASE(this, count, "SharedThreadPool");
+ if (count) {
+ return count;
+ }
+
+ // Remove SharedThreadPool from table of pools.
+ sPools->Remove(mName);
+ MOZ_ASSERT(!sPools->Get(mName));
+
+ // Dispatch an event to the main thread to call Shutdown() on
+ // the nsIThreadPool. The Runnable here will add a refcount to the pool,
+ // and when the Runnable releases the nsIThreadPool it will be deleted.
+ NS_DispatchToMainThread(NewRunnableMethod("nsIThreadPool::Shutdown", mPool,
+ &nsIThreadPool::Shutdown));
+
+ // Stabilize refcount, so that if something in the dtor QIs, it won't explode.
+ mRefCnt = 1;
+ delete this;
+ return 0;
+}
+
+NS_IMPL_QUERY_INTERFACE(SharedThreadPool, nsIThreadPool, nsIEventTarget)
+
+SharedThreadPool::SharedThreadPool(const nsCString& aName, nsIThreadPool* aPool)
+ : mName(aName), mPool(aPool), mRefCnt(0) {}
+
+SharedThreadPool::~SharedThreadPool() = default;
+
+nsresult SharedThreadPool::EnsureThreadLimitIsAtLeast(uint32_t aLimit) {
+ // We limit the number of threads that we use. Note that we
+ // set the thread limit to the same as the idle limit so that we're not
+ // constantly creating and destroying threads (see Bug 881954). When the
+ // thread pool threads shutdown they dispatch an event to the main thread
+ // to call nsIThread::Shutdown(), and if we're very busy that can take a
+ // while to run, and we end up with dozens of extra threads. Note that
+ // threads that are idle for 60 seconds are shutdown naturally.
+ uint32_t existingLimit = 0;
+ nsresult rv;
+
+ rv = mPool->GetThreadLimit(&existingLimit);
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (aLimit > existingLimit) {
+ rv = mPool->SetThreadLimit(aLimit);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ rv = mPool->GetIdleThreadLimit(&existingLimit);
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (aLimit > existingLimit) {
+ rv = mPool->SetIdleThreadLimit(aLimit);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ return NS_OK;
+}
+
+static already_AddRefed<nsIThreadPool> CreateThreadPool(
+ const nsCString& aName) {
+ nsCOMPtr<nsIThreadPool> pool = new nsThreadPool();
+
+ nsresult rv = pool->SetName(aName);
+ NS_ENSURE_SUCCESS(rv, nullptr);
+
+ rv = pool->SetThreadStackSize(nsIThreadManager::kThreadPoolStackSize);
+ NS_ENSURE_SUCCESS(rv, nullptr);
+
+ return pool.forget();
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/SharedThreadPool.h b/xpcom/threads/SharedThreadPool.h
new file mode 100644
index 0000000000..5c29ba11f2
--- /dev/null
+++ b/xpcom/threads/SharedThreadPool.h
@@ -0,0 +1,130 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef SharedThreadPool_h_
+#define SharedThreadPool_h_
+
+#include <utility>
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/RefCountType.h"
+#include "nsCOMPtr.h"
+#include "nsID.h"
+#include "nsIThreadPool.h"
+#include "nsString.h"
+#include "nscore.h"
+
+class nsIRunnable;
+
+namespace mozilla {
+
+// Wrapper that makes an nsIThreadPool a singleton, and provides a
+// consistent threadsafe interface to get instances. Callers simply get a
+// SharedThreadPool by the name of its nsIThreadPool. All get requests of
+// the same name get the same SharedThreadPool. Users must store a reference
+// to the pool, and when the last reference to a SharedThreadPool is dropped
+// the pool is shutdown and deleted. Users aren't required to manually
+// shutdown the pool, and can release references on any thread. This can make
+// it significantly easier to use thread pools, because the caller doesn't need
+// to worry about joining and tearing it down.
+//
+// On Windows all threads in the pool have MSCOM initialized with
+// COINIT_MULTITHREADED. Note that not all users of MSCOM use this mode see [1],
+// and mixing MSCOM objects between the two is terrible for performance, and can
+// cause some functions to fail. So be careful when using Win32 APIs on a
+// SharedThreadPool, and avoid sharing objects if at all possible.
+//
+// [1]
+// https://searchfox.org/mozilla-central/search?q=coinitialize&redirect=false
+class SharedThreadPool : public nsIThreadPool {
+ public:
+ // Gets (possibly creating) the shared thread pool singleton instance with
+ // thread pool named aName.
+ static already_AddRefed<SharedThreadPool> Get(const nsCString& aName,
+ uint32_t aThreadLimit = 4);
+
+ // We implement custom threadsafe AddRef/Release pair, that destroys the
+ // the shared pool singleton when the refcount drops to 0. The addref/release
+ // are implemented using locking, so it's not recommended that you use them
+ // in a tight loop.
+ NS_IMETHOD QueryInterface(REFNSIID aIID, void** aInstancePtr) override;
+ NS_IMETHOD_(MozExternalRefCountType) AddRef(void) override;
+ NS_IMETHOD_(MozExternalRefCountType) Release(void) override;
+
+ // Forward behaviour to wrapped thread pool implementation.
+ NS_FORWARD_SAFE_NSITHREADPOOL(mPool);
+
+ // Call this when dispatching from an event on the same
+ // threadpool that is about to complete. We should not create a new thread
+ // in that case since a thread is about to become idle.
+ nsresult DispatchFromEndOfTaskInThisPool(nsIRunnable* event) {
+ return Dispatch(event, NS_DISPATCH_AT_END);
+ }
+
+ NS_IMETHOD DispatchFromScript(nsIRunnable* event, uint32_t flags) override {
+ return Dispatch(event, flags);
+ }
+
+ NS_IMETHOD Dispatch(already_AddRefed<nsIRunnable> event,
+ uint32_t flags = NS_DISPATCH_NORMAL) override {
+ return !mPool ? NS_ERROR_NULL_POINTER
+ : mPool->Dispatch(std::move(event), flags);
+ }
+
+ NS_IMETHOD DelayedDispatch(already_AddRefed<nsIRunnable>, uint32_t) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ using nsIEventTarget::Dispatch;
+
+ NS_IMETHOD RegisterShutdownTask(nsITargetShutdownTask* task) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ NS_IMETHOD UnregisterShutdownTask(nsITargetShutdownTask* task) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ NS_IMETHOD IsOnCurrentThread(bool* _retval) override {
+ return !mPool ? NS_ERROR_NULL_POINTER : mPool->IsOnCurrentThread(_retval);
+ }
+
+ NS_IMETHOD_(bool) IsOnCurrentThreadInfallible() override {
+ return mPool && mPool->IsOnCurrentThread();
+ }
+
+ // Creates necessary statics. Called once at startup.
+ static void InitStatics();
+
+ // Spins the event loop until all thread pools are shutdown.
+ // *Must* be called on the main thread.
+ static void SpinUntilEmpty();
+
+ private:
+ // Returns whether there are no pools in existence at the moment.
+ static bool IsEmpty();
+
+ // Creates a singleton SharedThreadPool wrapper around aPool.
+ // aName is the name of the aPool, and is used to lookup the
+ // SharedThreadPool in the hash table of all created pools.
+ SharedThreadPool(const nsCString& aName, nsIThreadPool* aPool);
+ virtual ~SharedThreadPool();
+
+ nsresult EnsureThreadLimitIsAtLeast(uint32_t aThreadLimit);
+
+ // Name of mPool.
+ const nsCString mName;
+
+ // Thread pool being wrapped.
+ nsCOMPtr<nsIThreadPool> mPool;
+
+ // Refcount. We implement custom ref counting so that the thread pool is
+ // shutdown in a threadsafe manner and singletonness is preserved.
+ nsrefcnt mRefCnt;
+};
+
+} // namespace mozilla
+
+#endif // SharedThreadPool_h_
diff --git a/xpcom/threads/SpinEventLoopUntil.h b/xpcom/threads/SpinEventLoopUntil.h
new file mode 100644
index 0000000000..a281177268
--- /dev/null
+++ b/xpcom/threads/SpinEventLoopUntil.h
@@ -0,0 +1,191 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef xpcom_threads_SpinEventLoopUntil_h__
+#define xpcom_threads_SpinEventLoopUntil_h__
+
+#include "MainThreadUtils.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ProfilerLabels.h"
+#include "mozilla/ProfilerMarkers.h"
+#include "mozilla/StaticMutex.h"
+#include "nsString.h"
+#include "nsThreadUtils.h"
+#include "xpcpublic.h"
+
+class nsIThread;
+
+// A wrapper for nested event loops.
+//
+// This function is intended to make code more obvious (do you remember
+// what NS_ProcessNextEvent(nullptr, true) means?) and slightly more
+// efficient, as people often pass nullptr or NS_GetCurrentThread to
+// NS_ProcessNextEvent, which results in needless querying of the current
+// thread every time through the loop.
+//
+// You should use this function in preference to NS_ProcessNextEvent inside
+// a loop unless one of the following is true:
+//
+// * You need to pass `false` to NS_ProcessNextEvent; or
+// * You need to do unusual things around the call to NS_ProcessNextEvent,
+// such as unlocking mutexes that you are holding.
+//
+// If you *do* need to call NS_ProcessNextEvent manually, please do call
+// NS_GetCurrentThread() outside of your loop and pass the returned pointer
+// into NS_ProcessNextEvent for a tiny efficiency win.
+namespace mozilla {
+
+// You should normally not need to deal with this template parameter. If
+// you enjoy esoteric event loop details, read on.
+//
+// If you specify that NS_ProcessNextEvent wait for an event, it is possible
+// for NS_ProcessNextEvent to return false, i.e. to indicate that an event
+// was not processed. This can only happen when the thread has been shut
+// down by another thread, but is still attempting to process events outside
+// of a nested event loop.
+//
+// This behavior is admittedly strange. The scenario it deals with is the
+// following:
+//
+// * The current thread has been shut down by some owner thread.
+// * The current thread is spinning an event loop waiting for some condition
+// to become true.
+// * Said condition is actually being fulfilled by another thread, so there
+// are timing issues in play.
+//
+// Thus, there is a small window where the current thread's event loop
+// spinning can check the condition, find it false, and call
+// NS_ProcessNextEvent to wait for another event. But we don't actually
+// want it to wait indefinitely, because there might not be any other events
+// in the event loop, and the current thread can't accept dispatched events
+// because it's being shut down. Thus, actually blocking would hang the
+// thread, which is bad. The solution, then, is to detect such a scenario
+// and not actually block inside NS_ProcessNextEvent.
+//
+// But this is a problem, because we want to return the status of
+// NS_ProcessNextEvent to the caller of SpinEventLoopUntil if possible. In
+// the above scenario, however, we'd stop spinning prematurely and cause
+// all sorts of havoc. We therefore have this template parameter to
+// control whether errors are ignored or passed out to the caller of
+// SpinEventLoopUntil. The latter is the default; if you find yourself
+// wanting to use the former, you should think long and hard before doing
+// so, and write a comment like this defending your choice.
+
+enum class ProcessFailureBehavior {
+ IgnoreAndContinue,
+ ReportToCaller,
+};
+
+// SpinEventLoopUntil is a dangerous operation that can result in hangs.
+// In particular during shutdown we want to know if we are hanging
+// inside a nested event loop on the main thread.
+// This is a helper annotation class to keep track of this.
+struct MOZ_STACK_CLASS AutoNestedEventLoopAnnotation {
+ explicit AutoNestedEventLoopAnnotation(const nsACString& aEntry)
+ : mPrev(nullptr) {
+ if (NS_IsMainThread()) {
+ StaticMutexAutoLock lock(sStackMutex);
+ mPrev = sCurrent;
+ sCurrent = this;
+ if (mPrev) {
+ mStack = mPrev->mStack + "|"_ns + aEntry;
+ } else {
+ mStack = aEntry;
+ }
+ AnnotateXPCOMSpinEventLoopStack(mStack);
+ }
+ }
+
+ ~AutoNestedEventLoopAnnotation() {
+ if (NS_IsMainThread()) {
+ StaticMutexAutoLock lock(sStackMutex);
+ MOZ_ASSERT(sCurrent == this);
+ sCurrent = mPrev;
+ if (mPrev) {
+ AnnotateXPCOMSpinEventLoopStack(mPrev->mStack);
+ } else {
+ AnnotateXPCOMSpinEventLoopStack(""_ns);
+ }
+ }
+ }
+
+ static void CopyCurrentStack(nsCString& aNestedSpinStack) {
+ // We need to copy this behind a mutex as the
+ // memory for our instances is stack-bound and
+ // can go away at any time.
+ StaticMutexAutoLock lock(sStackMutex);
+ if (sCurrent) {
+ aNestedSpinStack = sCurrent->mStack;
+ } else {
+ aNestedSpinStack = "(no nested event loop active)"_ns;
+ }
+ }
+
+ private:
+ AutoNestedEventLoopAnnotation(const AutoNestedEventLoopAnnotation&) = delete;
+ AutoNestedEventLoopAnnotation& operator=(
+ const AutoNestedEventLoopAnnotation&) = delete;
+
+ // The declarations of these statics live in nsThreadManager.cpp.
+ static AutoNestedEventLoopAnnotation* sCurrent MOZ_GUARDED_BY(sStackMutex);
+ static StaticMutex sStackMutex;
+
+ // We need this to avoid the inclusion of nsExceptionHandler.h here
+ // which can include windows.h which disturbs some dom/media/gtest.
+ // The implementation lives in nsThreadManager.cpp.
+ static void AnnotateXPCOMSpinEventLoopStack(const nsACString& aStack);
+
+ AutoNestedEventLoopAnnotation* mPrev MOZ_GUARDED_BY(sStackMutex);
+ nsCString mStack MOZ_GUARDED_BY(sStackMutex);
+};
+
+// Please see the above notes for the Behavior template parameter.
+//
+// aVeryGoodReasonToDoThis is usually a literal string unique to each
+// caller that can be recognized in the XPCOMSpinEventLoopStack
+// annotation.
+// aPredicate is the condition we wait for.
+// aThread can be used to specify a thread, see the above introduction.
+// It defaults to the current thread.
+template <
+ ProcessFailureBehavior Behavior = ProcessFailureBehavior::ReportToCaller,
+ typename Pred>
+bool SpinEventLoopUntil(const nsACString& aVeryGoodReasonToDoThis,
+ Pred&& aPredicate, nsIThread* aThread = nullptr) {
+ // Prepare the annotations
+ AutoNestedEventLoopAnnotation annotation(aVeryGoodReasonToDoThis);
+ AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING_NONSENSITIVE(
+ "SpinEventLoopUntil", OTHER, aVeryGoodReasonToDoThis);
+ AUTO_PROFILER_MARKER_TEXT("SpinEventLoop", OTHER, MarkerStack::Capture(),
+ aVeryGoodReasonToDoThis);
+
+ nsIThread* thread = aThread ? aThread : NS_GetCurrentThread();
+
+ // From a latency perspective, spinning the event loop is like leaving script
+ // and returning to the event loop. Tell the watchdog we stopped running
+ // script (until we return).
+ mozilla::Maybe<xpc::AutoScriptActivity> asa;
+ if (NS_IsMainThread()) {
+ asa.emplace(false);
+ }
+
+ while (!aPredicate()) {
+ bool didSomething = NS_ProcessNextEvent(thread, true);
+
+ if (Behavior == ProcessFailureBehavior::IgnoreAndContinue) {
+ // Don't care what happened, continue on.
+ continue;
+ } else if (!didSomething) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace mozilla
+
+#endif // xpcom_threads_SpinEventLoopUntil_h__
diff --git a/xpcom/threads/StateMirroring.h b/xpcom/threads/StateMirroring.h
new file mode 100644
index 0000000000..c233116962
--- /dev/null
+++ b/xpcom/threads/StateMirroring.h
@@ -0,0 +1,393 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(StateMirroring_h_)
+# define StateMirroring_h_
+
+# include <cstddef>
+# include "mozilla/AbstractThread.h"
+# include "mozilla/AlreadyAddRefed.h"
+# include "mozilla/Assertions.h"
+# include "mozilla/Logging.h"
+# include "mozilla/Maybe.h"
+# include "mozilla/RefPtr.h"
+# include "mozilla/StateWatching.h"
+# include "nsCOMPtr.h"
+# include "nsIRunnable.h"
+# include "nsISupports.h"
+# include "nsTArray.h"
+# include "nsThreadUtils.h"
+
+/*
+ * The state-mirroring machinery allows pieces of interesting state to be
+ * observed on multiple thread without locking. The basic strategy is to track
+ * changes in a canonical value and post updates to other threads that hold
+ * mirrors for that value.
+ *
+ * One problem with the naive implementation of such a system is that some
+ * pieces of state need to be updated atomically, and certain other operations
+ * need to wait for these atomic updates to complete before executing. The
+ * state-mirroring machinery solves this problem by requiring that its owner
+ * thread uses tail dispatch, and posting state update events (which should
+ * always be run first by TaskDispatcher implementations) to that tail
+ * dispatcher. This ensures that state changes are always atomic from the
+ * perspective of observing threads.
+ *
+ * Given that state-mirroring is an automatic background process, we try to
+ * avoid burdening the caller with worrying too much about teardown. To that
+ * end, we don't assert dispatch success for any of the notifications, and
+ * assume that any canonical or mirror owned by a thread for whom dispatch fails
+ * will soon be disconnected by its holder anyway.
+ *
+ * Given that semantics may change and comments tend to go out of date, we
+ * deliberately don't provide usage examples here. Grep around to find them.
+ */
+
+namespace mozilla {
+
+// Mirror<T> and Canonical<T> inherit WatchTarget, so we piggy-back on the
+// logging that WatchTarget already does. Given that, it makes sense to share
+// the same log module.
+# define MIRROR_LOG(x, ...) \
+ MOZ_ASSERT(gStateWatchingLog); \
+ MOZ_LOG(gStateWatchingLog, LogLevel::Debug, (x, ##__VA_ARGS__))
+
+template <typename T>
+class AbstractMirror;
+
+/*
+ * AbstractCanonical is a superclass from which all Canonical values must
+ * inherit. It serves as the interface of operations which may be performed (via
+ * asynchronous dispatch) by other threads, in particular by the corresponding
+ * Mirror value.
+ */
+template <typename T>
+class AbstractCanonical {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AbstractCanonical)
+ AbstractCanonical(AbstractThread* aThread) : mOwnerThread(aThread) {}
+ virtual void AddMirror(AbstractMirror<T>* aMirror) = 0;
+ virtual void RemoveMirror(AbstractMirror<T>* aMirror) = 0;
+
+ AbstractThread* OwnerThread() const { return mOwnerThread; }
+
+ protected:
+ virtual ~AbstractCanonical() {}
+ RefPtr<AbstractThread> mOwnerThread;
+};
+
+/*
+ * AbstractMirror is a superclass from which all Mirror values must
+ * inherit. It serves as the interface of operations which may be performed (via
+ * asynchronous dispatch) by other threads, in particular by the corresponding
+ * Canonical value.
+ */
+template <typename T>
+class AbstractMirror {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AbstractMirror)
+ AbstractMirror(AbstractThread* aThread) : mOwnerThread(aThread) {}
+ virtual void UpdateValue(const T& aNewValue) = 0;
+ virtual void NotifyDisconnected() = 0;
+
+ AbstractThread* OwnerThread() const { return mOwnerThread; }
+
+ protected:
+ virtual ~AbstractMirror() {}
+ RefPtr<AbstractThread> mOwnerThread;
+};
+
+/*
+ * Canonical<T> is a wrapper class that allows a given value to be mirrored by
+ * other threads. It maintains a list of active mirrors, and queues updates for
+ * them when the internal value changes. When changing the value, the caller
+ * needs to pass a TaskDispatcher object, which fires the updates at the
+ * appropriate time. Canonical<T> is also a WatchTarget, and may be set up to
+ * trigger other routines (on the same thread) when the canonical value changes.
+ *
+ * Canonical<T> is intended to be used as a member variable, so it doesn't
+ * actually inherit AbstractCanonical<T> (a refcounted type). Rather, it
+ * contains an inner class called |Impl| that implements most of the interesting
+ * logic.
+ */
+template <typename T>
+class Canonical {
+ public:
+ Canonical(AbstractThread* aThread, const T& aInitialValue,
+ const char* aName) {
+ mImpl = new Impl(aThread, aInitialValue, aName);
+ }
+
+ ~Canonical() {}
+
+ private:
+ class Impl : public AbstractCanonical<T>, public WatchTarget {
+ public:
+ using AbstractCanonical<T>::OwnerThread;
+
+ Impl(AbstractThread* aThread, const T& aInitialValue, const char* aName)
+ : AbstractCanonical<T>(aThread),
+ WatchTarget(aName),
+ mValue(aInitialValue) {
+ MIRROR_LOG("%s [%p] initialized", mName, this);
+ MOZ_ASSERT(aThread->SupportsTailDispatch(),
+ "Can't get coherency without tail dispatch");
+ }
+
+ void AddMirror(AbstractMirror<T>* aMirror) override {
+ MIRROR_LOG("%s [%p] adding mirror %p", mName, this, aMirror);
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ MOZ_ASSERT(!mMirrors.Contains(aMirror));
+ mMirrors.AppendElement(aMirror);
+ aMirror->OwnerThread()->DispatchStateChange(MakeNotifier(aMirror));
+ }
+
+ void RemoveMirror(AbstractMirror<T>* aMirror) override {
+ MIRROR_LOG("%s [%p] removing mirror %p", mName, this, aMirror);
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ MOZ_ASSERT(mMirrors.Contains(aMirror));
+ mMirrors.RemoveElement(aMirror);
+ }
+
+ void DisconnectAll() {
+ MIRROR_LOG("%s [%p] Disconnecting all mirrors", mName, this);
+ for (size_t i = 0; i < mMirrors.Length(); ++i) {
+ mMirrors[i]->OwnerThread()->Dispatch(
+ NewRunnableMethod("AbstractMirror::NotifyDisconnected", mMirrors[i],
+ &AbstractMirror<T>::NotifyDisconnected));
+ }
+ mMirrors.Clear();
+ }
+
+ operator const T&() {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ return mValue;
+ }
+
+ void Set(const T& aNewValue) {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+
+ if (aNewValue == mValue) {
+ return;
+ }
+
+ // Notify same-thread watchers. The state watching machinery will make
+ // sure that notifications run at the right time.
+ NotifyWatchers();
+
+ // Check if we've already got a pending update. If so we won't schedule
+ // another one.
+ bool alreadyNotifying = mInitialValue.isSome();
+
+ // Stash the initial value if needed, then update to the new value.
+ if (mInitialValue.isNothing()) {
+ mInitialValue.emplace(mValue);
+ }
+ mValue = aNewValue;
+
+ // We wait until things have stablized before sending state updates so
+ // that we can avoid sending multiple updates, and possibly avoid sending
+ // any updates at all if the value ends up where it started.
+ if (!alreadyNotifying) {
+ AbstractThread::DispatchDirectTask(NewRunnableMethod(
+ "Canonical::Impl::DoNotify", this, &Impl::DoNotify));
+ }
+ }
+
+ Impl& operator=(const T& aNewValue) {
+ Set(aNewValue);
+ return *this;
+ }
+ Impl& operator=(const Impl& aOther) {
+ Set(aOther);
+ return *this;
+ }
+ Impl(const Impl& aOther) = delete;
+
+ protected:
+ ~Impl() { MOZ_DIAGNOSTIC_ASSERT(mMirrors.IsEmpty()); }
+
+ private:
+ void DoNotify() {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ MOZ_ASSERT(mInitialValue.isSome());
+ bool same = mInitialValue.ref() == mValue;
+ mInitialValue.reset();
+
+ if (same) {
+ MIRROR_LOG("%s [%p] unchanged - not sending update", mName, this);
+ return;
+ }
+
+ for (size_t i = 0; i < mMirrors.Length(); ++i) {
+ mMirrors[i]->OwnerThread()->DispatchStateChange(
+ MakeNotifier(mMirrors[i]));
+ }
+ }
+
+ already_AddRefed<nsIRunnable> MakeNotifier(AbstractMirror<T>* aMirror) {
+ return NewRunnableMethod<T>("AbstractMirror::UpdateValue", aMirror,
+ &AbstractMirror<T>::UpdateValue, mValue);
+ ;
+ }
+
+ T mValue;
+ Maybe<T> mInitialValue;
+ nsTArray<RefPtr<AbstractMirror<T>>> mMirrors;
+ };
+
+ public:
+ // NB: Because mirror-initiated disconnection can race with canonical-
+ // initiated disconnection, a canonical should never be reinitialized.
+ // Forward control operations to the Impl.
+ void DisconnectAll() { return mImpl->DisconnectAll(); }
+
+ // Access to the Impl.
+ operator Impl&() { return *mImpl; }
+ Impl* operator&() { return mImpl; }
+
+ // Access to the T.
+ const T& Ref() const { return *mImpl; }
+ operator const T&() const { return Ref(); }
+ void Set(const T& aNewValue) { mImpl->Set(aNewValue); }
+ Canonical& operator=(const T& aNewValue) {
+ Set(aNewValue);
+ return *this;
+ }
+ Canonical& operator=(const Canonical& aOther) {
+ Set(aOther);
+ return *this;
+ }
+ Canonical(const Canonical& aOther) = delete;
+
+ private:
+ RefPtr<Impl> mImpl;
+};
+
+/*
+ * Mirror<T> is a wrapper class that allows a given value to mirror that of a
+ * Canonical<T> owned by another thread. It registers itself with a
+ * Canonical<T>, and is periodically updated with new values. Mirror<T> is also
+ * a WatchTarget, and may be set up to trigger other routines (on the same
+ * thread) when the mirrored value changes.
+ *
+ * Mirror<T> is intended to be used as a member variable, so it doesn't actually
+ * inherit AbstractMirror<T> (a refcounted type). Rather, it contains an inner
+ * class called |Impl| that implements most of the interesting logic.
+ */
+template <typename T>
+class Mirror {
+ public:
+ Mirror(AbstractThread* aThread, const T& aInitialValue, const char* aName) {
+ mImpl = new Impl(aThread, aInitialValue, aName);
+ }
+
+ ~Mirror() {
+ // As a member of complex objects, a Mirror<T> may be destroyed on a
+ // different thread than its owner, or late in shutdown during CC. Given
+ // that, we require manual disconnection so that callers can put things in
+ // the right place.
+ MOZ_DIAGNOSTIC_ASSERT(!mImpl->IsConnected());
+ }
+
+ private:
+ class Impl : public AbstractMirror<T>, public WatchTarget {
+ public:
+ using AbstractMirror<T>::OwnerThread;
+
+ Impl(AbstractThread* aThread, const T& aInitialValue, const char* aName)
+ : AbstractMirror<T>(aThread),
+ WatchTarget(aName),
+ mValue(aInitialValue) {
+ MIRROR_LOG("%s [%p] initialized", mName, this);
+ MOZ_ASSERT(aThread->SupportsTailDispatch(),
+ "Can't get coherency without tail dispatch");
+ }
+
+ operator const T&() {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ return mValue;
+ }
+
+ virtual void UpdateValue(const T& aNewValue) override {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ if (mValue != aNewValue) {
+ mValue = aNewValue;
+ WatchTarget::NotifyWatchers();
+ }
+ }
+
+ virtual void NotifyDisconnected() override {
+ MIRROR_LOG("%s [%p] Notifed of disconnection from %p", mName, this,
+ mCanonical.get());
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ mCanonical = nullptr;
+ }
+
+ bool IsConnected() const { return !!mCanonical; }
+
+ void Connect(AbstractCanonical<T>* aCanonical) {
+ MIRROR_LOG("%s [%p] Connecting to %p", mName, this, aCanonical);
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ MOZ_ASSERT(!IsConnected());
+ MOZ_ASSERT(OwnerThread()->RequiresTailDispatch(aCanonical->OwnerThread()),
+ "Can't get coherency without tail dispatch");
+
+ nsCOMPtr<nsIRunnable> r =
+ NewRunnableMethod<StoreRefPtrPassByPtr<AbstractMirror<T>>>(
+ "AbstractCanonical::AddMirror", aCanonical,
+ &AbstractCanonical<T>::AddMirror, this);
+ aCanonical->OwnerThread()->Dispatch(r.forget());
+ mCanonical = aCanonical;
+ }
+
+ public:
+ void DisconnectIfConnected() {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ if (!IsConnected()) {
+ return;
+ }
+
+ MIRROR_LOG("%s [%p] Disconnecting from %p", mName, this,
+ mCanonical.get());
+ nsCOMPtr<nsIRunnable> r =
+ NewRunnableMethod<StoreRefPtrPassByPtr<AbstractMirror<T>>>(
+ "AbstractCanonical::RemoveMirror", mCanonical,
+ &AbstractCanonical<T>::RemoveMirror, this);
+ mCanonical->OwnerThread()->Dispatch(r.forget());
+ mCanonical = nullptr;
+ }
+
+ protected:
+ ~Impl() { MOZ_DIAGNOSTIC_ASSERT(!IsConnected()); }
+
+ private:
+ T mValue;
+ RefPtr<AbstractCanonical<T>> mCanonical;
+ };
+
+ public:
+ // Forward control operations to the Impl<T>.
+ void Connect(AbstractCanonical<T>* aCanonical) { mImpl->Connect(aCanonical); }
+ void DisconnectIfConnected() { mImpl->DisconnectIfConnected(); }
+
+ // Access to the Impl<T>.
+ operator Impl&() { return *mImpl; }
+ Impl* operator&() { return mImpl; }
+
+ // Access to the T.
+ const T& Ref() const { return *mImpl; }
+ operator const T&() const { return Ref(); }
+
+ private:
+ RefPtr<Impl> mImpl;
+};
+
+# undef MIRROR_LOG
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/StateWatching.h b/xpcom/threads/StateWatching.h
new file mode 100644
index 0000000000..3da0c63bfe
--- /dev/null
+++ b/xpcom/threads/StateWatching.h
@@ -0,0 +1,302 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(StateWatching_h_)
+# define StateWatching_h_
+
+# include <cstddef>
+# include <new>
+# include <utility>
+# include "mozilla/AbstractThread.h"
+# include "mozilla/Assertions.h"
+# include "mozilla/Logging.h"
+# include "mozilla/RefPtr.h"
+# include "nsISupports.h"
+# include "nsTArray.h"
+# include "nsThreadUtils.h"
+
+/*
+ * The state-watching machinery automates the process of responding to changes
+ * in various pieces of state.
+ *
+ * A standard programming pattern is as follows:
+ *
+ * mFoo = ...;
+ * NotifyStuffChanged();
+ * ...
+ * mBar = ...;
+ * NotifyStuffChanged();
+ *
+ * This pattern is error-prone and difficult to audit because it requires the
+ * programmer to manually trigger the update routine. This can be especially
+ * problematic when the update routine depends on numerous pieces of state, and
+ * when that state is modified across a variety of helper methods. In these
+ * cases the responsibility for invoking the routine is often unclear, causing
+ * developers to scatter calls to it like pixie dust. This can result in
+ * duplicate invocations (which is wasteful) and missing invocations in corner-
+ * cases (which is a source of bugs).
+ *
+ * This file provides a set of primitives that automatically handle updates and
+ * allow the programmers to explicitly construct a graph of state dependencies.
+ * When used correctly, it eliminates the guess-work and wasted cycles described
+ * above.
+ *
+ * There are two basic pieces:
+ * (1) Objects that can be watched for updates. These inherit WatchTarget.
+ * (2) Objects that receive objects and trigger processing. These inherit
+ * AbstractWatcher. In the current machinery, these exist only internally
+ * within the WatchManager, though that could change.
+ *
+ * Note that none of this machinery is thread-safe - it must all happen on the
+ * same owning thread. To solve multi-threaded use-cases, use state mirroring
+ * and watch the mirrored value.
+ *
+ * Given that semantics may change and comments tend to go out of date, we
+ * deliberately don't provide usage examples here. Grep around to find them.
+ */
+
+namespace mozilla {
+
+extern LazyLogModule gStateWatchingLog;
+
+# define WATCH_LOG(x, ...) \
+ MOZ_LOG(gStateWatchingLog, LogLevel::Debug, (x, ##__VA_ARGS__))
+
+/*
+ * AbstractWatcher is a superclass from which all watchers must inherit.
+ */
+class AbstractWatcher {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AbstractWatcher)
+ AbstractWatcher() : mDestroyed(false) {}
+ bool IsDestroyed() { return mDestroyed; }
+ virtual void Notify() = 0;
+
+ protected:
+ virtual ~AbstractWatcher() { MOZ_ASSERT(mDestroyed); }
+ bool mDestroyed;
+};
+
+/*
+ * WatchTarget is a superclass from which all watchable things must inherit.
+ * Unlike AbstractWatcher, it is a fully-implemented Mix-in, and the subclass
+ * needs only to invoke NotifyWatchers when something changes.
+ *
+ * The functionality that this class provides is not threadsafe, and should only
+ * be used on the thread that owns that WatchTarget.
+ */
+class WatchTarget {
+ public:
+ explicit WatchTarget(const char* aName) : mName(aName) {}
+
+ void AddWatcher(AbstractWatcher* aWatcher) {
+ MOZ_ASSERT(!mWatchers.Contains(aWatcher));
+ mWatchers.AppendElement(aWatcher);
+ }
+
+ void RemoveWatcher(AbstractWatcher* aWatcher) {
+ MOZ_ASSERT(mWatchers.Contains(aWatcher));
+ mWatchers.RemoveElement(aWatcher);
+ }
+
+ protected:
+ void NotifyWatchers() {
+ WATCH_LOG("%s[%p] notifying watchers\n", mName, this);
+ PruneWatchers();
+ for (size_t i = 0; i < mWatchers.Length(); ++i) {
+ mWatchers[i]->Notify();
+ }
+ }
+
+ private:
+ // We don't have Watchers explicitly unregister themselves when they die,
+ // because then they'd need back-references to all the WatchTargets they're
+ // subscribed to, and WatchTargets aren't reference-counted. So instead we
+ // just prune dead ones at appropriate times, which works just fine.
+ void PruneWatchers() {
+ mWatchers.RemoveElementsBy(
+ [](const auto& watcher) { return watcher->IsDestroyed(); });
+ }
+
+ nsTArray<RefPtr<AbstractWatcher>> mWatchers;
+
+ protected:
+ const char* mName;
+};
+
+/*
+ * Watchable is a wrapper class that turns any primitive into a WatchTarget.
+ */
+template <typename T>
+class Watchable : public WatchTarget {
+ public:
+ Watchable(const T& aInitialValue, const char* aName)
+ : WatchTarget(aName), mValue(aInitialValue) {}
+
+ const T& Ref() const { return mValue; }
+ operator const T&() const { return Ref(); }
+ Watchable& operator=(const T& aNewValue) {
+ if (aNewValue != mValue) {
+ mValue = aNewValue;
+ NotifyWatchers();
+ }
+
+ return *this;
+ }
+
+ private:
+ Watchable(const Watchable& aOther) = delete;
+ Watchable& operator=(const Watchable& aOther) = delete;
+
+ T mValue;
+};
+
+// Manager class for state-watching. Declare one of these in any class for which
+// you want to invoke method callbacks.
+//
+// Internally, WatchManager maintains one AbstractWatcher per callback method.
+// Consumers invoke Watch/Unwatch on a particular (WatchTarget, Callback) tuple.
+// This causes an AbstractWatcher for |Callback| to be instantiated if it
+// doesn't already exist, and registers it with |WatchTarget|.
+//
+// Using Direct Tasks on the TailDispatcher, WatchManager ensures that we fire
+// watch callbacks no more than once per task, once all other operations for
+// that task have been completed.
+//
+// WatchManager<OwnerType> is intended to be declared as a member of |OwnerType|
+// objects. Given that, it and its owned objects can't hold permanent strong
+// refs to the owner, since that would keep the owner alive indefinitely.
+// Instead, it _only_ holds strong refs while waiting for Direct Tasks to fire.
+// This ensures that everything is kept alive just long enough.
+template <typename OwnerType>
+class WatchManager {
+ public:
+ typedef void (OwnerType::*CallbackMethod)();
+ explicit WatchManager(OwnerType* aOwner, AbstractThread* aOwnerThread)
+ : mOwner(aOwner), mOwnerThread(aOwnerThread) {}
+
+ ~WatchManager() {
+ if (!IsShutdown()) {
+ Shutdown();
+ }
+ }
+
+ bool IsShutdown() const { return !mOwner; }
+
+ // Shutdown needs to happen on mOwnerThread. If the WatchManager will be
+ // destroyed on a different thread, Shutdown() must be called manually.
+ void Shutdown() {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ for (auto& watcher : mWatchers) {
+ watcher->Destroy();
+ }
+ mWatchers.Clear();
+ mOwner = nullptr;
+ }
+
+ void Watch(WatchTarget& aTarget, CallbackMethod aMethod) {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ aTarget.AddWatcher(&EnsureWatcher(aMethod));
+ }
+
+ void Unwatch(WatchTarget& aTarget, CallbackMethod aMethod) {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ PerCallbackWatcher* watcher = GetWatcher(aMethod);
+ MOZ_ASSERT(watcher);
+ aTarget.RemoveWatcher(watcher);
+ }
+
+ void ManualNotify(CallbackMethod aMethod) {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ PerCallbackWatcher* watcher = GetWatcher(aMethod);
+ MOZ_ASSERT(watcher);
+ watcher->Notify();
+ }
+
+ private:
+ class PerCallbackWatcher : public AbstractWatcher {
+ public:
+ PerCallbackWatcher(OwnerType* aOwner, AbstractThread* aOwnerThread,
+ CallbackMethod aMethod)
+ : mOwner(aOwner),
+ mOwnerThread(aOwnerThread),
+ mCallbackMethod(aMethod) {}
+
+ void Destroy() {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ mDestroyed = true;
+ mOwner = nullptr;
+ }
+
+ void Notify() override {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ MOZ_DIAGNOSTIC_ASSERT(mOwner,
+ "mOwner is only null after destruction, "
+ "at which point we shouldn't be notified");
+ if (mNotificationPending) {
+ // We've already got a notification job in the pipe.
+ return;
+ }
+ mNotificationPending = true;
+
+ // Queue up our notification jobs to run in a stable state.
+ AbstractThread::DispatchDirectTask(
+ NS_NewRunnableFunction("WatchManager::PerCallbackWatcher::Notify",
+ [self = RefPtr<PerCallbackWatcher>(this),
+ owner = RefPtr<OwnerType>(mOwner)]() {
+ if (!self->mDestroyed) {
+ ((*owner).*(self->mCallbackMethod))();
+ }
+ self->mNotificationPending = false;
+ }));
+ }
+
+ bool CallbackMethodIs(CallbackMethod aMethod) const {
+ return mCallbackMethod == aMethod;
+ }
+
+ private:
+ ~PerCallbackWatcher() = default;
+
+ OwnerType* mOwner; // Never null.
+ bool mNotificationPending = false;
+ RefPtr<AbstractThread> mOwnerThread;
+ CallbackMethod mCallbackMethod;
+ };
+
+ PerCallbackWatcher* GetWatcher(CallbackMethod aMethod) {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ for (auto& watcher : mWatchers) {
+ if (watcher->CallbackMethodIs(aMethod)) {
+ return watcher;
+ }
+ }
+ return nullptr;
+ }
+
+ PerCallbackWatcher& EnsureWatcher(CallbackMethod aMethod) {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ PerCallbackWatcher* watcher = GetWatcher(aMethod);
+ if (watcher) {
+ return *watcher;
+ }
+ watcher = mWatchers
+ .AppendElement(MakeAndAddRef<PerCallbackWatcher>(
+ mOwner, mOwnerThread, aMethod))
+ ->get();
+ return *watcher;
+ }
+
+ nsTArray<RefPtr<PerCallbackWatcher>> mWatchers;
+ OwnerType* mOwner;
+ RefPtr<AbstractThread> mOwnerThread;
+};
+
+# undef WATCH_LOG
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/SyncRunnable.h b/xpcom/threads/SyncRunnable.h
new file mode 100644
index 0000000000..77f82ba313
--- /dev/null
+++ b/xpcom/threads/SyncRunnable.h
@@ -0,0 +1,157 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_SyncRunnable_h
+#define mozilla_SyncRunnable_h
+
+#include <utility>
+
+#include "mozilla/AbstractThread.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/dom/JSExecutionManager.h"
+#include "nsThreadUtils.h"
+
+namespace mozilla {
+
+/**
+ * This class will wrap a nsIRunnable and dispatch it to the target thread
+ * synchronously. This is different from
+ * NS_DispatchAndSpinEventLoopUntilComplete: this class does not spin the event
+ * loop waiting for the event to be dispatched. This means that you don't risk
+ * reentrance from pending messages, but you must be sure that the target thread
+ * does not ever block on this thread, or else you will deadlock.
+ *
+ * Typical usage:
+ * RefPtr<SyncRunnable> sr = new SyncRunnable(new myrunnable...());
+ * sr->DispatchToThread(t);
+ *
+ * We also provide convenience wrappers:
+ * SyncRunnable::DispatchToThread(pThread, new myrunnable...());
+ * SyncRunnable::DispatchToThread(pThread, NS_NewRunnableFunction(...));
+ *
+ */
+class SyncRunnable : public Runnable {
+ public:
+ explicit SyncRunnable(nsIRunnable* aRunnable)
+ : Runnable("SyncRunnable"),
+ mRunnable(aRunnable),
+ mMonitor("SyncRunnable"),
+ mDone(false) {}
+
+ explicit SyncRunnable(already_AddRefed<nsIRunnable> aRunnable)
+ : Runnable("SyncRunnable"),
+ mRunnable(std::move(aRunnable)),
+ mMonitor("SyncRunnable"),
+ mDone(false) {}
+
+ nsresult DispatchToThread(nsIEventTarget* aThread,
+ bool aForceDispatch = false) {
+ nsresult rv;
+ bool on;
+
+ if (!aForceDispatch) {
+ rv = aThread->IsOnCurrentThread(&on);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+ if (NS_SUCCEEDED(rv) && on) {
+ mRunnable->Run();
+ return NS_OK;
+ }
+ }
+
+ rv = aThread->Dispatch(this, NS_DISPATCH_NORMAL);
+ if (NS_SUCCEEDED(rv)) {
+ mozilla::MonitorAutoLock lock(mMonitor);
+ // This could be synchronously dispatching to a thread currently waiting
+ // for JS execution clearance. Yield JS execution.
+ dom::AutoYieldJSThreadExecution yield;
+
+ while (!mDone) {
+ lock.Wait();
+ }
+ }
+ return rv;
+ }
+
+ nsresult DispatchToThread(AbstractThread* aThread,
+ bool aForceDispatch = false) {
+ if (!aForceDispatch && aThread->IsCurrentThreadIn()) {
+ mRunnable->Run();
+ return NS_OK;
+ }
+
+ // Check we don't have tail dispatching here. Otherwise we will deadlock
+ // ourself when spinning the loop below.
+ MOZ_ASSERT(!aThread->RequiresTailDispatchFromCurrentThread());
+
+ nsresult rv = aThread->Dispatch(RefPtr<nsIRunnable>(this).forget());
+ if (NS_SUCCEEDED(rv)) {
+ mozilla::MonitorAutoLock lock(mMonitor);
+ while (!mDone) {
+ lock.Wait();
+ }
+ }
+ return rv;
+ }
+
+ static nsresult DispatchToThread(nsIEventTarget* aThread,
+ nsIRunnable* aRunnable,
+ bool aForceDispatch = false) {
+ RefPtr<SyncRunnable> s(new SyncRunnable(aRunnable));
+ return s->DispatchToThread(aThread, aForceDispatch);
+ }
+
+ static nsresult DispatchToThread(AbstractThread* aThread,
+ nsIRunnable* aRunnable,
+ bool aForceDispatch = false) {
+ RefPtr<SyncRunnable> s(new SyncRunnable(aRunnable));
+ return s->DispatchToThread(aThread, aForceDispatch);
+ }
+
+ static nsresult DispatchToThread(nsIEventTarget* aThread,
+ already_AddRefed<nsIRunnable> aRunnable,
+ bool aForceDispatch = false) {
+ RefPtr<SyncRunnable> s(new SyncRunnable(std::move(aRunnable)));
+ return s->DispatchToThread(aThread, aForceDispatch);
+ }
+
+ static nsresult DispatchToThread(AbstractThread* aThread,
+ already_AddRefed<nsIRunnable> aRunnable,
+ bool aForceDispatch = false) {
+ RefPtr<SyncRunnable> s(new SyncRunnable(std::move(aRunnable)));
+ return s->DispatchToThread(aThread, aForceDispatch);
+ }
+
+ // These deleted overloads prevent accidentally (if harmlessly) double-
+ // wrapping SyncRunnable, which was previously a common anti-pattern.
+ static nsresult DispatchToThread(nsIEventTarget* aThread,
+ SyncRunnable* aRunnable,
+ bool aForceDispatch = false) = delete;
+ static nsresult DispatchToThread(AbstractThread* aThread,
+ SyncRunnable* aRunnable,
+ bool aForceDispatch = false) = delete;
+
+ protected:
+ NS_IMETHOD Run() override {
+ mRunnable->Run();
+
+ mozilla::MonitorAutoLock lock(mMonitor);
+ MOZ_ASSERT(!mDone);
+
+ mDone = true;
+ mMonitor.Notify();
+
+ return NS_OK;
+ }
+
+ private:
+ nsCOMPtr<nsIRunnable> mRunnable;
+ mozilla::Monitor mMonitor;
+ bool mDone MOZ_GUARDED_BY(mMonitor);
+};
+
+} // namespace mozilla
+
+#endif // mozilla_SyncRunnable_h
diff --git a/xpcom/threads/SynchronizedEventQueue.cpp b/xpcom/threads/SynchronizedEventQueue.cpp
new file mode 100644
index 0000000000..59161b7f9d
--- /dev/null
+++ b/xpcom/threads/SynchronizedEventQueue.cpp
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "SynchronizedEventQueue.h"
+#include "nsIThreadInternal.h"
+
+using namespace mozilla;
+
+void SynchronizedEventQueue::AddObserver(nsIThreadObserver* aObserver) {
+ MOZ_ASSERT(aObserver);
+ MOZ_ASSERT(!mEventObservers.Contains(aObserver));
+ mEventObservers.AppendElement(aObserver);
+}
+
+void SynchronizedEventQueue::RemoveObserver(nsIThreadObserver* aObserver) {
+ MOZ_ASSERT(aObserver);
+ MOZ_ALWAYS_TRUE(mEventObservers.RemoveElement(aObserver));
+}
+
+const nsTObserverArray<nsCOMPtr<nsIThreadObserver>>&
+SynchronizedEventQueue::EventObservers() {
+ return mEventObservers;
+}
diff --git a/xpcom/threads/SynchronizedEventQueue.h b/xpcom/threads/SynchronizedEventQueue.h
new file mode 100644
index 0000000000..e4cf1a62af
--- /dev/null
+++ b/xpcom/threads/SynchronizedEventQueue.h
@@ -0,0 +1,131 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_SynchronizedEventQueue_h
+#define mozilla_SynchronizedEventQueue_h
+
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/EventQueue.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Mutex.h"
+#include "nsIThreadInternal.h"
+#include "nsCOMPtr.h"
+#include "nsTObserverArray.h"
+
+class nsIEventTarget;
+class nsISerialEventTarget;
+class nsIThreadObserver;
+
+namespace mozilla {
+
+// A SynchronizedEventQueue is an abstract class for event queues that can be
+// used across threads. A SynchronizedEventQueue implementation will typically
+// use locks and condition variables to guarantee consistency. The methods of
+// SynchronizedEventQueue are split between ThreadTargetSink (which contains
+// methods for posting events) and SynchronizedEventQueue (which contains
+// methods for getting events). This split allows event targets (specifically
+// ThreadEventTarget) to use a narrow interface, since they only need to post
+// events.
+//
+// ThreadEventQueue is the canonical implementation of
+// SynchronizedEventQueue. When Quantum DOM is implemented, we will use a
+// different synchronized queue on the main thread, SchedulerEventQueue, which
+// will handle the cooperative threading model.
+
+class ThreadTargetSink {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ThreadTargetSink)
+
+ virtual bool PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
+ EventQueuePriority aPriority) = 0;
+
+ // After this method is called, no more events can be posted.
+ virtual void Disconnect(const MutexAutoLock& aProofOfLock) = 0;
+
+ virtual nsresult RegisterShutdownTask(nsITargetShutdownTask* aTask) = 0;
+ virtual nsresult UnregisterShutdownTask(nsITargetShutdownTask* aTask) = 0;
+
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) {
+ return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ // Not const because overrides may need to take a lock
+ virtual size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) = 0;
+
+ protected:
+ virtual ~ThreadTargetSink() = default;
+};
+
+class SynchronizedEventQueue : public ThreadTargetSink {
+ public:
+ virtual already_AddRefed<nsIRunnable> GetEvent(
+ bool aMayWait, mozilla::TimeDuration* aLastEventDelay = nullptr) = 0;
+ virtual bool HasPendingEvent() = 0;
+
+ // This method atomically checks if there are pending events and, if there are
+ // none, forbids future events from being posted. It returns true if there
+ // were no pending events.
+ virtual bool ShutdownIfNoPendingEvents() = 0;
+
+ // These methods provide access to an nsIThreadObserver, whose methods are
+ // called when posting and processing events. SetObserver should only be
+ // called on the thread that processes events. GetObserver can be called from
+ // any thread. GetObserverOnThread must be used from the thread that processes
+ // events; it does not acquire a lock.
+ virtual already_AddRefed<nsIThreadObserver> GetObserver() = 0;
+ virtual already_AddRefed<nsIThreadObserver> GetObserverOnThread() = 0;
+ virtual void SetObserver(nsIThreadObserver* aObserver) = 0;
+
+ void AddObserver(nsIThreadObserver* aObserver);
+ void RemoveObserver(nsIThreadObserver* aObserver);
+ const nsTObserverArray<nsCOMPtr<nsIThreadObserver>>& EventObservers();
+
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) override {
+ // Normally we'd return
+ // mEventObservers.ShallowSizeOfExcludingThis(aMallocSizeOf); However,
+ // mEventObservers may be being mutated on another thread, and we don't lock
+ // around access, so locking here wouldn't help. They're small, so
+ return 0;
+ }
+
+ /**
+ * This method causes any events currently enqueued on the thread to be
+ * suppressed until PopEventQueue is called, and any event dispatched to this
+ * thread's nsIEventTarget will queue as well. Calls to PushEventQueue may be
+ * nested and must each be paired with a call to PopEventQueue in order to
+ * restore the original state of the thread. The returned nsIEventTarget may
+ * be used to push events onto the nested queue. Dispatching will be disabled
+ * once the event queue is popped. The thread will only ever process pending
+ * events for the innermost event queue. Must only be called on the target
+ * thread.
+ */
+ virtual already_AddRefed<nsISerialEventTarget> PushEventQueue() = 0;
+
+ /**
+ * Revert a call to PushEventQueue. When an event queue is popped, any events
+ * remaining in the queue are appended to the elder queue. This also causes
+ * the nsIEventTarget returned from PushEventQueue to stop dispatching events.
+ * Must only be called on the target thread, and with the innermost event
+ * queue.
+ */
+ virtual void PopEventQueue(nsIEventTarget* aTarget) = 0;
+
+ /**
+ * Flush the list of shutdown tasks which were previously registered. After
+ * this is called, new shutdown tasks cannot be registered.
+ */
+ virtual void RunShutdownTasks() = 0;
+
+ protected:
+ virtual ~SynchronizedEventQueue() = default;
+
+ private:
+ nsTObserverArray<nsCOMPtr<nsIThreadObserver>> mEventObservers;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_SynchronizedEventQueue_h
diff --git a/xpcom/threads/TaskCategory.h b/xpcom/threads/TaskCategory.h
new file mode 100644
index 0000000000..8f189c8737
--- /dev/null
+++ b/xpcom/threads/TaskCategory.h
@@ -0,0 +1,47 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_TaskCategory_h
+#define mozilla_TaskCategory_h
+
+namespace mozilla {
+
+// The different kinds of tasks we can dispatch to a SystemGroup, TabGroup, or
+// DocGroup.
+enum class TaskCategory {
+ // User input (clicks, keypresses, etc.)
+ UI,
+
+ // Data from the network
+ Network,
+
+ // setTimeout, setInterval
+ Timer,
+
+ // Runnables posted from a worker to the main thread
+ Worker,
+
+ // requestIdleCallback
+ IdleCallback,
+
+ // Vsync notifications
+ RefreshDriver,
+
+ // GC/CC-related tasks
+ GarbageCollection,
+
+ // Most DOM events (postMessage, media, plugins)
+ Other,
+
+ // Runnables related to Performance Counting
+ Performance,
+
+ Count
+};
+
+} // namespace mozilla
+
+#endif // mozilla_TaskCategory_h
diff --git a/xpcom/threads/TaskController.cpp b/xpcom/threads/TaskController.cpp
new file mode 100644
index 0000000000..37410bda1c
--- /dev/null
+++ b/xpcom/threads/TaskController.cpp
@@ -0,0 +1,1072 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "TaskController.h"
+#include "nsIIdleRunnable.h"
+#include "nsIRunnable.h"
+#include "nsThreadUtils.h"
+#include <algorithm>
+#include <initializer_list>
+#include "GeckoProfiler.h"
+#include "mozilla/EventQueue.h"
+#include "mozilla/BackgroundHangMonitor.h"
+#include "mozilla/InputTaskManager.h"
+#include "mozilla/VsyncTaskManager.h"
+#include "mozilla/IOInterposer.h"
+#include "mozilla/StaticMutex.h"
+#include "mozilla/SchedulerGroup.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/Unused.h"
+#include "nsIThreadInternal.h"
+#include "nsQueryObject.h"
+#include "nsThread.h"
+#include "prenv.h"
+#include "prsystem.h"
+
+namespace mozilla {
+
+std::unique_ptr<TaskController> TaskController::sSingleton;
+thread_local size_t mThreadPoolIndex = -1;
+std::atomic<uint64_t> Task::sCurrentTaskSeqNo = 0;
+
+const int32_t kMinimumPoolThreadCount = 2;
+const int32_t kMaximumPoolThreadCount = 8;
+
+/* static */
+int32_t TaskController::GetPoolThreadCount() {
+ if (PR_GetEnv("MOZ_TASKCONTROLLER_THREADCOUNT")) {
+ return strtol(PR_GetEnv("MOZ_TASKCONTROLLER_THREADCOUNT"), nullptr, 0);
+ }
+
+ int32_t numCores = std::max<int32_t>(1, PR_GetNumberOfProcessors());
+
+ return std::clamp<int32_t>(numCores, kMinimumPoolThreadCount,
+ kMaximumPoolThreadCount);
+}
+
+#if defined(MOZ_COLLECTING_RUNNABLE_TELEMETRY)
+
+struct TaskMarker {
+ static constexpr Span<const char> MarkerTypeName() {
+ return MakeStringSpan("Task");
+ }
+ static void StreamJSONMarkerData(baseprofiler::SpliceableJSONWriter& aWriter,
+ const nsCString& aName, uint32_t aPriority) {
+ aWriter.StringProperty("name", aName);
+ aWriter.IntProperty("priority", aPriority);
+
+# define EVENT_PRIORITY(NAME, VALUE) \
+ if (aPriority == (VALUE)) { \
+ aWriter.StringProperty("priorityName", #NAME); \
+ } else
+ EVENT_QUEUE_PRIORITY_LIST(EVENT_PRIORITY)
+# undef EVENT_PRIORITY
+ {
+ aWriter.StringProperty("priorityName", "Invalid Value");
+ }
+ }
+ static MarkerSchema MarkerTypeDisplay() {
+ using MS = MarkerSchema;
+ MS schema{MS::Location::MarkerChart, MS::Location::MarkerTable};
+ schema.SetChartLabel("{marker.data.name}");
+ schema.SetTableLabel(
+ "{marker.name} - {marker.data.name} - priority: "
+ "{marker.data.priorityName} ({marker.data.priority})");
+ schema.AddKeyLabelFormatSearchable("name", "Task Name", MS::Format::String,
+ MS::Searchable::Searchable);
+ schema.AddKeyLabelFormat("priorityName", "Priority Name",
+ MS::Format::String);
+ schema.AddKeyLabelFormat("priority", "Priority level", MS::Format::Integer);
+ return schema;
+ }
+};
+
+class MOZ_RAII AutoProfileTask {
+ public:
+ explicit AutoProfileTask(nsACString& aName, uint64_t aPriority)
+ : mName(aName), mPriority(aPriority) {
+ if (profiler_is_active()) {
+ mStartTime = TimeStamp::Now();
+ }
+ }
+
+ ~AutoProfileTask() {
+ if (!profiler_thread_is_being_profiled_for_markers()) {
+ return;
+ }
+
+ AUTO_PROFILER_LABEL("AutoProfileTask", PROFILER);
+ AUTO_PROFILER_STATS(AUTO_PROFILE_TASK);
+ profiler_add_marker("Runnable", ::mozilla::baseprofiler::category::OTHER,
+ mStartTime.IsNull()
+ ? MarkerTiming::IntervalEnd()
+ : MarkerTiming::IntervalUntilNowFrom(mStartTime),
+ TaskMarker{}, mName, mPriority);
+ }
+
+ private:
+ TimeStamp mStartTime;
+ nsAutoCString mName;
+ uint32_t mPriority;
+};
+
+# define AUTO_PROFILE_FOLLOWING_TASK(task) \
+ nsAutoCString name; \
+ (task)->GetName(name); \
+ AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING_NONSENSITIVE("Task", OTHER, name); \
+ mozilla::AutoProfileTask PROFILER_RAII(name, (task)->GetPriority());
+#else
+# define AUTO_PROFILE_FOLLOWING_TASK(task)
+#endif
+
+bool TaskManager::
+ UpdateCachesForCurrentIterationAndReportPriorityModifierChanged(
+ const MutexAutoLock& aProofOfLock, IterationType aIterationType) {
+ mCurrentSuspended = IsSuspended(aProofOfLock);
+
+ if (aIterationType == IterationType::EVENT_LOOP_TURN && !mCurrentSuspended) {
+ int32_t oldModifier = mCurrentPriorityModifier;
+ mCurrentPriorityModifier =
+ GetPriorityModifierForEventLoopTurn(aProofOfLock);
+
+ if (mCurrentPriorityModifier != oldModifier) {
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+class MOZ_RAII AutoSetMainThreadRunnableName {
+ public:
+ explicit AutoSetMainThreadRunnableName(const nsCString& aName) {
+ MOZ_ASSERT(NS_IsMainThread());
+ // We want to record our current runnable's name in a static so
+ // that BHR can record it.
+ mRestoreRunnableName = nsThread::sMainThreadRunnableName;
+
+ // Copy the name into sMainThreadRunnableName's buffer, and append a
+ // terminating null.
+ uint32_t length = std::min((uint32_t)nsThread::kRunnableNameBufSize - 1,
+ (uint32_t)aName.Length());
+ memcpy(nsThread::sMainThreadRunnableName.begin(), aName.BeginReading(),
+ length);
+ nsThread::sMainThreadRunnableName[length] = '\0';
+ }
+
+ ~AutoSetMainThreadRunnableName() {
+ nsThread::sMainThreadRunnableName = mRestoreRunnableName;
+ }
+
+ private:
+ Array<char, nsThread::kRunnableNameBufSize> mRestoreRunnableName;
+};
+#endif
+
+Task* Task::GetHighestPriorityDependency() {
+ Task* currentTask = this;
+
+ while (!currentTask->mDependencies.empty()) {
+ auto iter = currentTask->mDependencies.begin();
+
+ while (iter != currentTask->mDependencies.end()) {
+ if ((*iter)->mCompleted) {
+ auto oldIter = iter;
+ iter++;
+ // Completed tasks are removed here to prevent needlessly keeping them
+ // alive or iterating over them in the future.
+ currentTask->mDependencies.erase(oldIter);
+ continue;
+ }
+
+ currentTask = iter->get();
+ break;
+ }
+ }
+
+ return currentTask == this ? nullptr : currentTask;
+}
+
+TaskController* TaskController::Get() {
+ MOZ_ASSERT(sSingleton.get());
+ return sSingleton.get();
+}
+
+void TaskController::Initialize() {
+ MOZ_ASSERT(!sSingleton);
+ sSingleton = std::make_unique<TaskController>();
+}
+
+void ThreadFuncPoolThread(void* aIndex) {
+ mThreadPoolIndex = *reinterpret_cast<int32_t*>(aIndex);
+ delete reinterpret_cast<int32_t*>(aIndex);
+ TaskController::Get()->RunPoolThread();
+}
+
+TaskController::TaskController()
+ : mGraphMutex("TaskController::mGraphMutex"),
+ mThreadPoolCV(mGraphMutex, "TaskController::mThreadPoolCV"),
+ mMainThreadCV(mGraphMutex, "TaskController::mMainThreadCV"),
+ mRunOutOfMTTasksCounter(0) {
+ InputTaskManager::Init();
+ VsyncTaskManager::Init();
+ mMTProcessingRunnable = NS_NewRunnableFunction(
+ "TaskController::ExecutePendingMTTasks()",
+ []() { TaskController::Get()->ProcessPendingMTTask(); });
+ mMTBlockingProcessingRunnable = NS_NewRunnableFunction(
+ "TaskController::ExecutePendingMTTasks()",
+ []() { TaskController::Get()->ProcessPendingMTTask(true); });
+}
+
+// We want our default stack size limit to be approximately 2MB, to be safe for
+// JS helper tasks that can use a lot of stack, but expect most threads to use
+// much less. On Linux, however, requesting a stack of 2MB or larger risks the
+// kernel allocating an entire 2MB huge page for it on first access, which we do
+// not want. To avoid this possibility, we subtract 2 standard VM page sizes
+// from our default.
+constexpr PRUint32 sBaseStackSize = 2048 * 1024 - 2 * 4096;
+
+// TSan enforces a minimum stack size that's just slightly larger than our
+// default helper stack size. It does this to store blobs of TSan-specific data
+// on each thread's stack. Unfortunately, that means that even though we'll
+// actually receive a larger stack than we requested, the effective usable space
+// of that stack is significantly less than what we expect. To offset TSan
+// stealing our stack space from underneath us, double the default.
+//
+// Similarly, ASan requires more stack space due to red-zones.
+#if defined(MOZ_TSAN) || defined(MOZ_ASAN)
+constexpr PRUint32 sStackSize = 2 * sBaseStackSize;
+#else
+constexpr PRUint32 sStackSize = sBaseStackSize;
+#endif
+
+void TaskController::InitializeThreadPool() {
+ mPoolInitializationMutex.AssertCurrentThreadOwns();
+ MOZ_ASSERT(!mThreadPoolInitialized);
+ mThreadPoolInitialized = true;
+
+ int32_t poolSize = GetPoolThreadCount();
+ for (int32_t i = 0; i < poolSize; i++) {
+ int32_t* index = new int32_t(i);
+ mPoolThreads.push_back(
+ {PR_CreateThread(PR_USER_THREAD, ThreadFuncPoolThread, index,
+ PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+ PR_JOINABLE_THREAD, sStackSize),
+ nullptr});
+ }
+}
+
+/* static */
+size_t TaskController::GetThreadStackSize() { return sStackSize; }
+
+void TaskController::SetPerformanceCounterState(
+ PerformanceCounterState* aPerformanceCounterState) {
+ mPerformanceCounterState = aPerformanceCounterState;
+}
+
+/* static */
+void TaskController::Shutdown() {
+ InputTaskManager::Cleanup();
+ VsyncTaskManager::Cleanup();
+ if (sSingleton) {
+ sSingleton->ShutdownThreadPoolInternal();
+ sSingleton->ShutdownInternal();
+ }
+ MOZ_ASSERT(!sSingleton);
+}
+
+void TaskController::ShutdownThreadPoolInternal() {
+ {
+ // Prevent racecondition on mShuttingDown and wait.
+ MutexAutoLock lock(mGraphMutex);
+
+ mShuttingDown = true;
+ mThreadPoolCV.NotifyAll();
+ }
+ for (PoolThread& thread : mPoolThreads) {
+ PR_JoinThread(thread.mThread);
+ }
+}
+
+void TaskController::ShutdownInternal() { sSingleton = nullptr; }
+
+void TaskController::RunPoolThread() {
+ IOInterposer::RegisterCurrentThread();
+
+ // This is used to hold on to a task to make sure it is released outside the
+ // lock. This is required since it's perfectly feasible for task destructors
+ // to post events themselves.
+ RefPtr<Task> lastTask;
+
+ nsAutoCString threadName;
+ threadName.AppendLiteral("TaskController #");
+ threadName.AppendInt(static_cast<int64_t>(mThreadPoolIndex));
+ AUTO_PROFILER_REGISTER_THREAD(threadName.BeginReading());
+
+ MutexAutoLock lock(mGraphMutex);
+ while (true) {
+ bool ranTask = false;
+
+ if (!mThreadableTasks.empty()) {
+ for (auto iter = mThreadableTasks.begin(); iter != mThreadableTasks.end();
+ ++iter) {
+ // Search for the highest priority dependency of the highest priority
+ // task.
+
+ // We work with rawptrs to avoid needless refcounting. All our tasks
+ // are always kept alive by the graph. If one is removed from the graph
+ // it is kept alive by mPoolThreads[mThreadPoolIndex].mCurrentTask.
+ Task* task = iter->get();
+
+ MOZ_ASSERT(!task->mTaskManager);
+
+ mPoolThreads[mThreadPoolIndex].mEffectiveTaskPriority =
+ task->GetPriority();
+
+ Task* nextTask;
+ while ((nextTask = task->GetHighestPriorityDependency())) {
+ task = nextTask;
+ }
+
+ if (task->IsMainThreadOnly() || task->mInProgress) {
+ continue;
+ }
+
+ mPoolThreads[mThreadPoolIndex].mCurrentTask = task;
+ mThreadableTasks.erase(task->mIterator);
+ task->mIterator = mThreadableTasks.end();
+ task->mInProgress = true;
+
+ if (!mThreadableTasks.empty()) {
+ // Ensure at least one additional thread is woken up if there are
+ // more threadable tasks to process. Notifying all threads at once
+ // isn't actually better for performance since they all need the
+ // GraphMutex to proceed anyway.
+ mThreadPoolCV.Notify();
+ }
+
+ bool taskCompleted = false;
+ {
+ MutexAutoUnlock unlock(mGraphMutex);
+ lastTask = nullptr;
+ AUTO_PROFILE_FOLLOWING_TASK(task);
+ taskCompleted = task->Run();
+ ranTask = true;
+ }
+
+ task->mInProgress = false;
+
+ if (!taskCompleted) {
+ // Presumably this task was interrupted, leave its dependencies
+ // unresolved and reinsert into the queue.
+ auto insertion = mThreadableTasks.insert(
+ mPoolThreads[mThreadPoolIndex].mCurrentTask);
+ MOZ_ASSERT(insertion.second);
+ task->mIterator = insertion.first;
+ } else {
+ task->mCompleted = true;
+#ifdef DEBUG
+ task->mIsInGraph = false;
+#endif
+ task->mDependencies.clear();
+ // This may have unblocked a main thread task. We could do this only
+ // if there was a main thread task before this one in the dependency
+ // chain.
+ mMayHaveMainThreadTask = true;
+ // Since this could have multiple dependencies thare are restricted
+ // to the main thread. Let's make sure that's awake.
+ EnsureMainThreadTasksScheduled();
+
+ MaybeInterruptTask(GetHighestPriorityMTTask());
+ }
+
+ // Store last task for release next time we release the lock or enter
+ // wait state.
+ lastTask = mPoolThreads[mThreadPoolIndex].mCurrentTask.forget();
+ break;
+ }
+ }
+
+ // Ensure the last task is released before we enter the wait state.
+ if (lastTask) {
+ MutexAutoUnlock unlock(mGraphMutex);
+ lastTask = nullptr;
+
+ // Run another loop iteration, while we were unlocked there was an
+ // opportunity for another task to be posted or shutdown to be initiated.
+ continue;
+ }
+
+ if (!ranTask) {
+ if (mShuttingDown) {
+ IOInterposer::UnregisterCurrentThread();
+ MOZ_ASSERT(mThreadableTasks.empty());
+ return;
+ }
+
+ AUTO_PROFILER_LABEL("TaskController::RunPoolThread", IDLE);
+ mThreadPoolCV.Wait();
+ }
+ }
+}
+
+void TaskController::AddTask(already_AddRefed<Task>&& aTask) {
+ RefPtr<Task> task(aTask);
+
+ if (!task->IsMainThreadOnly()) {
+ MutexAutoLock lock(mPoolInitializationMutex);
+ if (!mThreadPoolInitialized) {
+ InitializeThreadPool();
+ }
+ }
+
+ MutexAutoLock lock(mGraphMutex);
+
+ if (TaskManager* manager = task->GetManager()) {
+ if (manager->mTaskCount == 0) {
+ mTaskManagers.insert(manager);
+ }
+ manager->DidQueueTask();
+
+ // Set this here since if this manager's priority modifier doesn't change
+ // we will not reprioritize when iterating over the queue.
+ task->mPriorityModifier = manager->mCurrentPriorityModifier;
+ }
+
+ if (profiler_is_active_and_unpaused()) {
+ task->mInsertionTime = TimeStamp::Now();
+ }
+
+#ifdef DEBUG
+ task->mIsInGraph = true;
+
+ for (const RefPtr<Task>& otherTask : task->mDependencies) {
+ MOZ_ASSERT(!otherTask->mTaskManager ||
+ otherTask->mTaskManager == task->mTaskManager);
+ }
+#endif
+
+ LogTask::LogDispatch(task);
+
+ std::pair<std::set<RefPtr<Task>, Task::PriorityCompare>::iterator, bool>
+ insertion;
+ if (task->IsMainThreadOnly()) {
+ insertion = mMainThreadTasks.insert(std::move(task));
+ } else {
+ insertion = mThreadableTasks.insert(std::move(task));
+ }
+ (*insertion.first)->mIterator = insertion.first;
+ MOZ_ASSERT(insertion.second);
+
+ MaybeInterruptTask(*insertion.first);
+}
+
+void TaskController::WaitForTaskOrMessage() {
+ MutexAutoLock lock(mGraphMutex);
+ while (!mMayHaveMainThreadTask) {
+ AUTO_PROFILER_LABEL("TaskController::WaitForTaskOrMessage", IDLE);
+ mMainThreadCV.Wait();
+ }
+}
+
+void TaskController::ExecuteNextTaskOnlyMainThread() {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mGraphMutex);
+ ExecuteNextTaskOnlyMainThreadInternal(lock);
+}
+
+void TaskController::ProcessPendingMTTask(bool aMayWait) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MutexAutoLock lock(mGraphMutex);
+
+ for (;;) {
+ // We only ever process one event here. However we may sometimes
+ // not actually process a real event because of suspended tasks.
+ // This loop allows us to wait until we've processed something
+ // in that scenario.
+
+ mMTTaskRunnableProcessedTask = ExecuteNextTaskOnlyMainThreadInternal(lock);
+
+ if (mMTTaskRunnableProcessedTask || !aMayWait) {
+ break;
+ }
+
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ // Unlock before calling into the BackgroundHangMonitor API as it uses
+ // the timer API.
+ {
+ MutexAutoUnlock unlock(mGraphMutex);
+ BackgroundHangMonitor().NotifyWait();
+ }
+#endif
+
+ {
+ // ProcessNextEvent will also have attempted to wait, however we may have
+ // given it a Runnable when all the tasks in our task graph were suspended
+ // but we weren't able to cheaply determine that.
+ AUTO_PROFILER_LABEL("TaskController::ProcessPendingMTTask", IDLE);
+ mMainThreadCV.Wait();
+ }
+
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ {
+ MutexAutoUnlock unlock(mGraphMutex);
+ BackgroundHangMonitor().NotifyActivity();
+ }
+#endif
+ }
+
+ if (mMayHaveMainThreadTask) {
+ EnsureMainThreadTasksScheduled();
+ }
+}
+
+void TaskController::ReprioritizeTask(Task* aTask, uint32_t aPriority) {
+ MutexAutoLock lock(mGraphMutex);
+ std::set<RefPtr<Task>, Task::PriorityCompare>* queue = &mMainThreadTasks;
+ if (!aTask->IsMainThreadOnly()) {
+ queue = &mThreadableTasks;
+ }
+
+ MOZ_ASSERT(aTask->mIterator != queue->end());
+ queue->erase(aTask->mIterator);
+
+ aTask->mPriority = aPriority;
+
+ auto insertion = queue->insert(aTask);
+ MOZ_ASSERT(insertion.second);
+ aTask->mIterator = insertion.first;
+
+ MaybeInterruptTask(aTask);
+}
+
+// Code supporting runnable compatibility.
+// Task that wraps a runnable.
+class RunnableTask : public Task {
+ public:
+ RunnableTask(already_AddRefed<nsIRunnable>&& aRunnable, int32_t aPriority,
+ bool aMainThread = true)
+ : Task(aMainThread, aPriority), mRunnable(aRunnable) {}
+
+ virtual bool Run() override {
+ mRunnable->Run();
+ mRunnable = nullptr;
+ return true;
+ }
+
+ void SetIdleDeadline(TimeStamp aDeadline) override {
+ nsCOMPtr<nsIIdleRunnable> idleRunnable = do_QueryInterface(mRunnable);
+ if (idleRunnable) {
+ idleRunnable->SetDeadline(aDeadline);
+ }
+ }
+
+ PerformanceCounter* GetPerformanceCounter() const override {
+ return nsThread::GetPerformanceCounterBase(mRunnable);
+ }
+
+ virtual bool GetName(nsACString& aName) override {
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ nsThread::GetLabeledRunnableName(mRunnable, aName,
+ EventQueuePriority(GetPriority()));
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ private:
+ RefPtr<nsIRunnable> mRunnable;
+};
+
+void TaskController::DispatchRunnable(already_AddRefed<nsIRunnable>&& aRunnable,
+ uint32_t aPriority,
+ TaskManager* aManager) {
+ RefPtr<RunnableTask> task = new RunnableTask(std::move(aRunnable), aPriority);
+
+ task->SetManager(aManager);
+ TaskController::Get()->AddTask(task.forget());
+}
+
+nsIRunnable* TaskController::GetRunnableForMTTask(bool aReallyWait) {
+ MutexAutoLock lock(mGraphMutex);
+
+ while (mMainThreadTasks.empty()) {
+ if (!aReallyWait) {
+ return nullptr;
+ }
+
+ AUTO_PROFILER_LABEL("TaskController::GetRunnableForMTTask::Wait", IDLE);
+ mMainThreadCV.Wait();
+ }
+
+ return aReallyWait ? mMTBlockingProcessingRunnable : mMTProcessingRunnable;
+}
+
+bool TaskController::HasMainThreadPendingTasks() {
+ MOZ_ASSERT(NS_IsMainThread());
+ auto resetIdleState = MakeScopeExit([&idleManager = mIdleTaskManager] {
+ if (idleManager) {
+ idleManager->State().ClearCachedIdleDeadline();
+ }
+ });
+
+ for (bool considerIdle : {false, true}) {
+ if (considerIdle && !mIdleTaskManager) {
+ continue;
+ }
+
+ MutexAutoLock lock(mGraphMutex);
+
+ if (considerIdle) {
+ mIdleTaskManager->State().ForgetPendingTaskGuarantee();
+ // Temporarily unlock so we can peek our idle deadline.
+ // XXX We could do this _before_ we take the lock if the API would let us.
+ // We do want to do this before looking at mMainThreadTasks, in case
+ // someone adds one while we're unlocked.
+ {
+ MutexAutoUnlock unlock(mGraphMutex);
+ mIdleTaskManager->State().CachePeekedIdleDeadline(unlock);
+ }
+ }
+
+ // Return early if there's no tasks at all.
+ if (mMainThreadTasks.empty()) {
+ return false;
+ }
+
+ // We can cheaply count how many tasks are suspended.
+ uint64_t totalSuspended = 0;
+ for (TaskManager* manager : mTaskManagers) {
+ DebugOnly<bool> modifierChanged =
+ manager
+ ->UpdateCachesForCurrentIterationAndReportPriorityModifierChanged(
+ lock, TaskManager::IterationType::NOT_EVENT_LOOP_TURN);
+ MOZ_ASSERT(!modifierChanged);
+
+ // The idle manager should be suspended unless we're doing the idle pass.
+ MOZ_ASSERT(manager != mIdleTaskManager || manager->mCurrentSuspended ||
+ considerIdle,
+ "Why are idle tasks not suspended here?");
+
+ if (manager->mCurrentSuspended) {
+ // XXX - If managers manage off-main-thread tasks this breaks! This
+ // scenario is explicitly not supported.
+ //
+ // This is only incremented inside the lock -or- decremented on the main
+ // thread so this is safe.
+ totalSuspended += manager->mTaskCount;
+ }
+ }
+
+ // This would break down if we have a non-suspended task depending on a
+ // suspended task. This is why for the moment we do not allow tasks
+ // to be dependent on tasks managed by another taskmanager.
+ if (mMainThreadTasks.size() > totalSuspended) {
+ // If mIdleTaskManager->mTaskCount is 0, we never updated the suspended
+ // state of mIdleTaskManager above, hence shouldn't even check it here.
+ // But in that case idle tasks are not contributing to our suspended task
+ // count anyway.
+ if (mIdleTaskManager && mIdleTaskManager->mTaskCount &&
+ !mIdleTaskManager->mCurrentSuspended) {
+ MOZ_ASSERT(considerIdle, "Why is mIdleTaskManager not suspended?");
+ // Check whether the idle tasks were really needed to make our "we have
+ // an unsuspended task" decision. If they were, we need to force-enable
+ // idle tasks until we run our next task.
+ if (mMainThreadTasks.size() - mIdleTaskManager->mTaskCount <=
+ totalSuspended) {
+ mIdleTaskManager->State().EnforcePendingTaskGuarantee();
+ }
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+uint64_t TaskController::PendingMainthreadTaskCountIncludingSuspended() {
+ MutexAutoLock lock(mGraphMutex);
+ return mMainThreadTasks.size();
+}
+
+bool TaskController::ExecuteNextTaskOnlyMainThreadInternal(
+ const MutexAutoLock& aProofOfLock) {
+ MOZ_ASSERT(NS_IsMainThread());
+ mGraphMutex.AssertCurrentThreadOwns();
+ // Block to make it easier to jump to our cleanup.
+ bool taskRan = false;
+ do {
+ taskRan = DoExecuteNextTaskOnlyMainThreadInternal(aProofOfLock);
+ if (taskRan) {
+ if (mIdleTaskManager && mIdleTaskManager->mTaskCount &&
+ mIdleTaskManager->IsSuspended(aProofOfLock)) {
+ uint32_t activeTasks = mMainThreadTasks.size();
+ for (TaskManager* manager : mTaskManagers) {
+ if (manager->IsSuspended(aProofOfLock)) {
+ activeTasks -= manager->mTaskCount;
+ } else {
+ break;
+ }
+ }
+
+ if (!activeTasks) {
+ // We have only idle (and maybe other suspended) tasks left, so need
+ // to update the idle state. We need to temporarily release the lock
+ // while we do that.
+ MutexAutoUnlock unlock(mGraphMutex);
+ mIdleTaskManager->State().RequestIdleDeadlineIfNeeded(unlock);
+ }
+ }
+ break;
+ }
+
+ if (!mIdleTaskManager) {
+ break;
+ }
+
+ if (mIdleTaskManager->mTaskCount) {
+ // We have idle tasks that we may not have gotten above because
+ // our idle state is not up to date. We need to update the idle state
+ // and try again. We need to temporarily release the lock while we do
+ // that.
+ MutexAutoUnlock unlock(mGraphMutex);
+ mIdleTaskManager->State().UpdateCachedIdleDeadline(unlock);
+ } else {
+ MutexAutoUnlock unlock(mGraphMutex);
+ mIdleTaskManager->State().RanOutOfTasks(unlock);
+ }
+
+ // When we unlocked, someone may have queued a new task on us. So try to
+ // see whether we can run things again.
+ taskRan = DoExecuteNextTaskOnlyMainThreadInternal(aProofOfLock);
+ } while (false);
+
+ if (mIdleTaskManager) {
+ // The pending task guarantee is not needed anymore, since we just tried
+ // running a task
+ mIdleTaskManager->State().ForgetPendingTaskGuarantee();
+
+ if (mMainThreadTasks.empty()) {
+ ++mRunOutOfMTTasksCounter;
+
+ // XXX the IdlePeriodState API demands we have a MutexAutoUnlock for it.
+ // Otherwise we could perhaps just do this after we exit the locked block,
+ // by pushing the lock down into this method. Though it's not clear that
+ // we could check mMainThreadTasks.size() once we unlock, and whether we
+ // could maybe substitute mMayHaveMainThreadTask for that check.
+ MutexAutoUnlock unlock(mGraphMutex);
+ mIdleTaskManager->State().RanOutOfTasks(unlock);
+ }
+ }
+
+ return taskRan;
+}
+
+bool TaskController::DoExecuteNextTaskOnlyMainThreadInternal(
+ const MutexAutoLock& aProofOfLock) {
+ mGraphMutex.AssertCurrentThreadOwns();
+
+ nsCOMPtr<nsIThread> mainIThread;
+ NS_GetMainThread(getter_AddRefs(mainIThread));
+
+ nsThread* mainThread = static_cast<nsThread*>(mainIThread.get());
+ if (mainThread) {
+ mainThread->SetRunningEventDelay(TimeDuration(), TimeStamp());
+ }
+
+ uint32_t totalSuspended = 0;
+ for (TaskManager* manager : mTaskManagers) {
+ bool modifierChanged =
+ manager
+ ->UpdateCachesForCurrentIterationAndReportPriorityModifierChanged(
+ aProofOfLock, TaskManager::IterationType::EVENT_LOOP_TURN);
+ if (modifierChanged) {
+ ProcessUpdatedPriorityModifier(manager);
+ }
+ if (manager->mCurrentSuspended) {
+ totalSuspended += manager->mTaskCount;
+ }
+ }
+
+ MOZ_ASSERT(mMainThreadTasks.size() >= totalSuspended);
+
+ // This would break down if we have a non-suspended task depending on a
+ // suspended task. This is why for the moment we do not allow tasks
+ // to be dependent on tasks managed by another taskmanager.
+ if (mMainThreadTasks.size() > totalSuspended) {
+ for (auto iter = mMainThreadTasks.begin(); iter != mMainThreadTasks.end();
+ iter++) {
+ Task* task = iter->get();
+
+ if (task->mTaskManager && task->mTaskManager->mCurrentSuspended) {
+ // Even though we may want to run some dependencies of this task, we
+ // will run them at their own priority level and not the priority
+ // level of their dependents.
+ continue;
+ }
+
+ task = GetFinalDependency(task);
+
+ if (!task->IsMainThreadOnly() || task->mInProgress ||
+ (task->mTaskManager && task->mTaskManager->mCurrentSuspended)) {
+ continue;
+ }
+
+ mCurrentTasksMT.push(task);
+ mMainThreadTasks.erase(task->mIterator);
+ task->mIterator = mMainThreadTasks.end();
+ task->mInProgress = true;
+ TaskManager* manager = task->GetManager();
+ bool result = false;
+
+ {
+ MutexAutoUnlock unlock(mGraphMutex);
+ if (manager) {
+ manager->WillRunTask();
+ if (manager != mIdleTaskManager) {
+ // Notify the idle period state that we're running a non-idle task.
+ // This needs to happen while our mutex is not locked!
+ mIdleTaskManager->State().FlagNotIdle();
+ } else {
+ TimeStamp idleDeadline =
+ mIdleTaskManager->State().GetCachedIdleDeadline();
+ MOZ_ASSERT(
+ idleDeadline,
+ "How can we not have a deadline if our manager is enabled?");
+ task->SetIdleDeadline(idleDeadline);
+ }
+ }
+ if (mIdleTaskManager) {
+ // We found a task to run; we can clear the idle deadline on our idle
+ // task manager. This _must_ be done before we actually run the task,
+ // because running the task could reenter via spinning the event loop
+ // and we want to make sure there's no cached idle deadline at that
+ // point. But we have to make sure we do it after out SetIdleDeadline
+ // call above, in the case when the task is actually an idle task.
+ mIdleTaskManager->State().ClearCachedIdleDeadline();
+ }
+
+ TimeStamp now = TimeStamp::Now();
+
+ if (mainThread) {
+ if (task->GetPriority() < uint32_t(EventQueuePriority::InputHigh) ||
+ task->mInsertionTime.IsNull()) {
+ mainThread->SetRunningEventDelay(TimeDuration(), now);
+ } else {
+ mainThread->SetRunningEventDelay(now - task->mInsertionTime, now);
+ }
+ }
+
+ nsAutoCString name;
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ task->GetName(name);
+#endif
+
+ PerformanceCounterState::Snapshot snapshot =
+ mPerformanceCounterState->RunnableWillRun(
+ task->GetPerformanceCounter(), now,
+ manager == mIdleTaskManager);
+
+ {
+ LogTask::Run log(task);
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ AutoSetMainThreadRunnableName nameGuard(name);
+#endif
+ AUTO_PROFILE_FOLLOWING_TASK(task);
+ result = task->Run();
+ }
+
+ // Task itself should keep manager alive.
+ if (manager) {
+ manager->DidRunTask();
+ }
+
+ mPerformanceCounterState->RunnableDidRun(name, std::move(snapshot));
+ }
+
+ // Task itself should keep manager alive.
+ if (manager && result && manager->mTaskCount == 0) {
+ mTaskManagers.erase(manager);
+ }
+
+ task->mInProgress = false;
+
+ if (!result) {
+ // Presumably this task was interrupted, leave its dependencies
+ // unresolved and reinsert into the queue.
+ auto insertion =
+ mMainThreadTasks.insert(std::move(mCurrentTasksMT.top()));
+ MOZ_ASSERT(insertion.second);
+ task->mIterator = insertion.first;
+ manager->WillRunTask();
+ } else {
+ task->mCompleted = true;
+#ifdef DEBUG
+ task->mIsInGraph = false;
+#endif
+ // Clear dependencies to release references.
+ task->mDependencies.clear();
+
+ if (!mThreadableTasks.empty()) {
+ // We're going to wake up a single thread in our pool. This thread
+ // is responsible for waking up additional threads in the situation
+ // where more than one task became available.
+ mThreadPoolCV.Notify();
+ }
+ }
+
+ mCurrentTasksMT.pop();
+ return true;
+ }
+ }
+
+ mMayHaveMainThreadTask = false;
+ if (mIdleTaskManager) {
+ // We did not find a task to run. We still need to clear the cached idle
+ // deadline on our idle state, because that deadline was only relevant to
+ // the execution of this function. Had we found a task, we would have
+ // cleared the deadline before running that task.
+ mIdleTaskManager->State().ClearCachedIdleDeadline();
+ }
+ return false;
+}
+
+Task* TaskController::GetFinalDependency(Task* aTask) {
+ Task* nextTask;
+
+ while ((nextTask = aTask->GetHighestPriorityDependency())) {
+ aTask = nextTask;
+ }
+
+ return aTask;
+}
+
+void TaskController::MaybeInterruptTask(Task* aTask) {
+ mGraphMutex.AssertCurrentThreadOwns();
+
+ if (!aTask) {
+ return;
+ }
+
+ // This optimization prevents many slow lookups in long chains of similar
+ // priority.
+ if (!aTask->mDependencies.empty()) {
+ Task* firstDependency = aTask->mDependencies.begin()->get();
+ if (aTask->GetPriority() <= firstDependency->GetPriority() &&
+ !firstDependency->mCompleted &&
+ aTask->IsMainThreadOnly() == firstDependency->IsMainThreadOnly()) {
+ // This task has the same or a higher priority as one of its dependencies,
+ // never any need to interrupt.
+ return;
+ }
+ }
+
+ Task* finalDependency = GetFinalDependency(aTask);
+
+ if (finalDependency->mInProgress) {
+ // No need to wake anything, we can't schedule this task right now anyway.
+ return;
+ }
+
+ if (aTask->IsMainThreadOnly()) {
+ mMayHaveMainThreadTask = true;
+
+ EnsureMainThreadTasksScheduled();
+
+ if (mCurrentTasksMT.empty()) {
+ return;
+ }
+
+ // We could go through the steps above here and interrupt an off main
+ // thread task in case it has a lower priority.
+ if (!finalDependency->IsMainThreadOnly()) {
+ return;
+ }
+
+ if (mCurrentTasksMT.top()->GetPriority() < aTask->GetPriority()) {
+ mCurrentTasksMT.top()->RequestInterrupt(aTask->GetPriority());
+ }
+ } else {
+ Task* lowestPriorityTask = nullptr;
+ for (PoolThread& thread : mPoolThreads) {
+ if (!thread.mCurrentTask) {
+ mThreadPoolCV.Notify();
+ // There's a free thread, no need to interrupt anything.
+ return;
+ }
+
+ if (!lowestPriorityTask) {
+ lowestPriorityTask = thread.mCurrentTask.get();
+ continue;
+ }
+
+ // This should possibly select the lowest priority task which was started
+ // the latest. But for now we ignore that optimization.
+ // This also doesn't guarantee a task is interruptable, so that's an
+ // avenue for improvements as well.
+ if (lowestPriorityTask->GetPriority() > thread.mEffectiveTaskPriority) {
+ lowestPriorityTask = thread.mCurrentTask.get();
+ }
+ }
+
+ if (lowestPriorityTask->GetPriority() < aTask->GetPriority()) {
+ lowestPriorityTask->RequestInterrupt(aTask->GetPriority());
+ }
+
+ // We choose not to interrupt main thread tasks for tasks which may be
+ // executed off the main thread.
+ }
+}
+
+Task* TaskController::GetHighestPriorityMTTask() {
+ mGraphMutex.AssertCurrentThreadOwns();
+
+ if (!mMainThreadTasks.empty()) {
+ return mMainThreadTasks.begin()->get();
+ }
+ return nullptr;
+}
+
+void TaskController::EnsureMainThreadTasksScheduled() {
+ if (mObserver) {
+ mObserver->OnDispatchedEvent();
+ }
+ if (mExternalCondVar) {
+ mExternalCondVar->Notify();
+ }
+ mMainThreadCV.Notify();
+}
+
+void TaskController::ProcessUpdatedPriorityModifier(TaskManager* aManager) {
+ mGraphMutex.AssertCurrentThreadOwns();
+
+ MOZ_ASSERT(NS_IsMainThread());
+
+ int32_t modifier = aManager->mCurrentPriorityModifier;
+
+ std::vector<RefPtr<Task>> storedTasks;
+ // Find all relevant tasks.
+ for (auto iter = mMainThreadTasks.begin(); iter != mMainThreadTasks.end();) {
+ if ((*iter)->mTaskManager == aManager) {
+ storedTasks.push_back(*iter);
+ iter = mMainThreadTasks.erase(iter);
+ } else {
+ iter++;
+ }
+ }
+
+ // Reinsert found tasks with their new priorities.
+ for (RefPtr<Task>& ref : storedTasks) {
+ // Kept alive at first by the vector and then by mMainThreadTasks.
+ Task* task = ref;
+ task->mPriorityModifier = modifier;
+ auto insertion = mMainThreadTasks.insert(std::move(ref));
+ MOZ_ASSERT(insertion.second);
+ task->mIterator = insertion.first;
+ }
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/TaskController.h b/xpcom/threads/TaskController.h
new file mode 100644
index 0000000000..184080002a
--- /dev/null
+++ b/xpcom/threads/TaskController.h
@@ -0,0 +1,445 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_TaskController_h
+#define mozilla_TaskController_h
+
+#include "MainThreadUtils.h"
+#include "mozilla/CondVar.h"
+#include "mozilla/IdlePeriodState.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/StaticMutex.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/EventQueue.h"
+#include "nsISupportsImpl.h"
+#include "nsIEventTarget.h"
+
+#include <atomic>
+#include <memory>
+#include <vector>
+#include <set>
+#include <list>
+#include <stack>
+
+class nsIRunnable;
+class nsIThreadObserver;
+
+namespace mozilla {
+
+class Task;
+class TaskController;
+class PerformanceCounter;
+class PerformanceCounterState;
+
+const EventQueuePriority kDefaultPriorityValue = EventQueuePriority::Normal;
+
+// This file contains the core classes to access the Gecko scheduler. The
+// scheduler forms a graph of prioritize tasks, and is responsible for ensuring
+// the execution of tasks or their dependencies in order of inherited priority.
+//
+// The core class is the 'Task' class. The task class describes a single unit of
+// work. Users scheduling work implement this class and are required to
+// reimplement the 'Run' function in order to do work.
+//
+// The TaskManager class is reimplemented by users that require
+// the ability to reprioritize or suspend tasks.
+//
+// The TaskController is responsible for scheduling the work itself. The AddTask
+// function is used to schedule work. The ReprioritizeTask function may be used
+// to change the priority of a task already in the task graph, without
+// unscheduling it.
+
+// The TaskManager is the baseclass used to atomically manage a large set of
+// tasks. API users reimplementing TaskManager may reimplement a number of
+// functions that they may use to indicate to the scheduler changes in the state
+// for any tasks they manage. They may be used to reprioritize or suspend tasks
+// under their control, and will also be notified before and after tasks under
+// their control are executed. Their methods will only be called once per event
+// loop turn, however they may still incur some performance overhead. In
+// addition to this frequent reprioritizations may incur a significant
+// performance overhead and are discouraged. A TaskManager may currently only be
+// used to manage tasks that are bound to the Gecko Main Thread.
+class TaskManager {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TaskManager)
+
+ TaskManager() : mTaskCount(0) {}
+
+ // Subclasses implementing task manager will have this function called to
+ // determine whether their associated tasks are currently suspended. This
+ // will only be called once per iteration of the task queue, this means that
+ // suspension of tasks managed by a single TaskManager may be assumed to
+ // occur atomically.
+ virtual bool IsSuspended(const MutexAutoLock& aProofOfLock) { return false; }
+
+ // Subclasses may implement this in order to supply a priority adjustment
+ // to their managed tasks. This is called once per iteration of the task
+ // queue, and may be assumed to occur atomically for all managed tasks.
+ virtual int32_t GetPriorityModifierForEventLoopTurn(
+ const MutexAutoLock& aProofOfLock) {
+ return 0;
+ }
+
+ void DidQueueTask() { ++mTaskCount; }
+ // This is called when a managed task is about to be executed by the
+ // scheduler. Anyone reimplementing this should ensure to call the parent or
+ // decrement mTaskCount.
+ virtual void WillRunTask() { --mTaskCount; }
+ // This is called when a managed task has finished being executed by the
+ // scheduler.
+ virtual void DidRunTask() {}
+ uint32_t PendingTaskCount() { return mTaskCount; }
+
+ protected:
+ virtual ~TaskManager() {}
+
+ private:
+ friend class TaskController;
+
+ enum class IterationType { NOT_EVENT_LOOP_TURN, EVENT_LOOP_TURN };
+ bool UpdateCachesForCurrentIterationAndReportPriorityModifierChanged(
+ const MutexAutoLock& aProofOfLock, IterationType aIterationType);
+
+ bool mCurrentSuspended = false;
+ int32_t mCurrentPriorityModifier = 0;
+
+ std::atomic<uint32_t> mTaskCount;
+};
+
+// A Task is the the base class for any unit of work that may be scheduled.
+// Subclasses may specify their priority and whether they should be bound to
+// the Gecko Main thread. When not bound to the main thread tasks may be
+// executed on any available thread (including the main thread), but they may
+// also be executed in parallel to any other task they do not have a dependency
+// relationship with. Tasks will be run in order of object creation.
+class Task {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Task)
+
+ bool IsMainThreadOnly() { return mMainThreadOnly; }
+
+ // This returns the current task priority with its modifier applied.
+ uint32_t GetPriority() { return mPriority + mPriorityModifier; }
+ uint64_t GetSeqNo() { return mSeqNo; }
+
+ // Callee needs to assume this may be called on any thread.
+ // aInterruptPriority passes the priority of the higher priority task that
+ // is ready to be executed. The task may safely ignore this function, or
+ // interrupt any work being done. It may return 'false' from its run function
+ // in order to be run automatically in the future, or true if it will
+ // reschedule incomplete work manually.
+ virtual void RequestInterrupt(uint32_t aInterruptPriority) {}
+
+ // At the moment this -must- be called before the task is added to the
+ // controller. Calling this after tasks have been added to the controller
+ // results in undefined behavior!
+ // At submission, tasks must depend only on tasks managed by the same, or
+ // no idle manager.
+ void AddDependency(Task* aTask) {
+ MOZ_ASSERT(aTask);
+ MOZ_ASSERT(!mIsInGraph);
+ mDependencies.insert(aTask);
+ }
+
+ // This sets the TaskManager for the current task. Calling this after the
+ // task has been added to the TaskController results in undefined behavior.
+ void SetManager(TaskManager* aManager) {
+ MOZ_ASSERT(mMainThreadOnly);
+ MOZ_ASSERT(!mIsInGraph);
+ mTaskManager = aManager;
+ }
+ TaskManager* GetManager() { return mTaskManager; }
+
+ struct PriorityCompare {
+ bool operator()(const RefPtr<Task>& aTaskA,
+ const RefPtr<Task>& aTaskB) const {
+ uint32_t prioA = aTaskA->GetPriority();
+ uint32_t prioB = aTaskB->GetPriority();
+ return (prioA > prioB) ||
+ (prioA == prioB && (aTaskA->GetSeqNo() < aTaskB->GetSeqNo()));
+ }
+ };
+
+ // Tell the task about its idle deadline. Will only be called for
+ // tasks managed by an IdleTaskManager, right before the task runs.
+ virtual void SetIdleDeadline(TimeStamp aDeadline) {}
+
+ virtual PerformanceCounter* GetPerformanceCounter() const { return nullptr; }
+
+ // Get a name for this task. This returns false if the task has no name.
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ virtual bool GetName(nsACString& aName) = 0;
+#else
+ virtual bool GetName(nsACString& aName) { return false; }
+#endif
+
+ protected:
+ Task(bool aMainThreadOnly,
+ uint32_t aPriority = static_cast<uint32_t>(kDefaultPriorityValue))
+ : mMainThreadOnly(aMainThreadOnly),
+ mSeqNo(sCurrentTaskSeqNo++),
+ mPriority(aPriority) {}
+
+ Task(bool aMainThreadOnly,
+ EventQueuePriority aPriority = kDefaultPriorityValue)
+ : mMainThreadOnly(aMainThreadOnly),
+ mSeqNo(sCurrentTaskSeqNo++),
+ mPriority(static_cast<uint32_t>(aPriority)) {}
+
+ virtual ~Task() {}
+
+ friend class TaskController;
+
+ // When this returns false, the task is considered incomplete and will be
+ // rescheduled at the current 'mPriority' level.
+ virtual bool Run() = 0;
+
+ private:
+ Task* GetHighestPriorityDependency();
+
+ // Iterator pointing to this task's position in
+ // mThreadableTasks/mMainThreadTasks if, and only if this task is currently
+ // scheduled to be executed. This allows fast access to the task's position
+ // in the set, allowing for fast removal.
+ // This is safe, and remains valid unless the task is removed from the set.
+ // See also iterator invalidation in:
+ // https://en.cppreference.com/w/cpp/container
+ //
+ // Or the spec:
+ // "All Associative Containers: The insert and emplace members shall not
+ // affect the validity of iterators and references to the container
+ // [26.2.6/9]" "All Associative Containers: The erase members shall invalidate
+ // only iterators and references to the erased elements [26.2.6/9]"
+ std::set<RefPtr<Task>, PriorityCompare>::iterator mIterator;
+ std::set<RefPtr<Task>, PriorityCompare> mDependencies;
+
+ RefPtr<TaskManager> mTaskManager;
+
+ // Access to these variables is protected by the GraphMutex.
+ bool mMainThreadOnly;
+ bool mCompleted = false;
+ bool mInProgress = false;
+#ifdef DEBUG
+ bool mIsInGraph = false;
+#endif
+
+ static std::atomic<uint64_t> sCurrentTaskSeqNo;
+ int64_t mSeqNo;
+ uint32_t mPriority;
+ // Modifier currently being applied to this task by its taskmanager.
+ int32_t mPriorityModifier = 0;
+ // Time this task was inserted into the task graph, this is used by the
+ // profiler.
+ mozilla::TimeStamp mInsertionTime;
+};
+
+struct PoolThread {
+ PRThread* mThread;
+ RefPtr<Task> mCurrentTask;
+ // This may be higher than mCurrentTask's priority due to priority
+ // propagation. This is -only- valid when mCurrentTask != nullptr.
+ uint32_t mEffectiveTaskPriority;
+};
+
+// A task manager implementation for priority levels that should only
+// run during idle periods.
+class IdleTaskManager : public TaskManager {
+ public:
+ explicit IdleTaskManager(already_AddRefed<nsIIdlePeriod>&& aIdlePeriod)
+ : mIdlePeriodState(std::move(aIdlePeriod)), mProcessedTaskCount(0) {}
+
+ IdlePeriodState& State() { return mIdlePeriodState; }
+
+ bool IsSuspended(const MutexAutoLock& aProofOfLock) override {
+ TimeStamp idleDeadline = State().GetCachedIdleDeadline();
+ return !idleDeadline;
+ }
+
+ void DidRunTask() override {
+ TaskManager::DidRunTask();
+ ++mProcessedTaskCount;
+ }
+
+ uint64_t ProcessedTaskCount() { return mProcessedTaskCount; }
+
+ private:
+ // Tracking of our idle state of various sorts.
+ IdlePeriodState mIdlePeriodState;
+
+ std::atomic<uint64_t> mProcessedTaskCount;
+};
+
+// The TaskController is the core class of the scheduler. It is used to
+// schedule tasks to be executed, as well as to reprioritize tasks that have
+// already been scheduled. The core functions to do this are AddTask and
+// ReprioritizeTask.
+class TaskController {
+ public:
+ TaskController();
+
+ static TaskController* Get();
+
+ static void Initialize();
+
+ void SetThreadObserver(nsIThreadObserver* aObserver) {
+ MutexAutoLock lock(mGraphMutex);
+ mObserver = aObserver;
+ }
+ void SetConditionVariable(CondVar* aExternalCondVar) {
+ MutexAutoLock lock(mGraphMutex);
+ mExternalCondVar = aExternalCondVar;
+ }
+
+ void SetIdleTaskManager(IdleTaskManager* aIdleTaskManager) {
+ mIdleTaskManager = aIdleTaskManager;
+ }
+ IdleTaskManager* GetIdleTaskManager() { return mIdleTaskManager.get(); }
+
+ uint64_t RunOutOfMTTasksCount() { return mRunOutOfMTTasksCounter; }
+
+ // Initialization and shutdown code.
+ void SetPerformanceCounterState(
+ PerformanceCounterState* aPerformanceCounterState);
+
+ static void Shutdown();
+
+ // This adds a task to the TaskController graph.
+ // This may be called on any thread.
+ void AddTask(already_AddRefed<Task>&& aTask);
+
+ // This wait function is the theoretical function you would need if our main
+ // thread needs to also process OS messages or something along those lines.
+ void WaitForTaskOrMessage();
+
+ // This gets the next (highest priority) task that is only allowed to execute
+ // on the main thread.
+ void ExecuteNextTaskOnlyMainThread();
+
+ // Process all pending main thread tasks.
+ void ProcessPendingMTTask(bool aMayWait = false);
+
+ // This allows reprioritization of a task already in the task graph.
+ // This may be called on any thread.
+ void ReprioritizeTask(Task* aTask, uint32_t aPriority);
+
+ void DispatchRunnable(already_AddRefed<nsIRunnable>&& aRunnable,
+ uint32_t aPriority, TaskManager* aManager = nullptr);
+
+ nsIRunnable* GetRunnableForMTTask(bool aReallyWait);
+
+ bool HasMainThreadPendingTasks();
+
+ uint64_t PendingMainthreadTaskCountIncludingSuspended();
+
+ // Let users know whether the last main thread task runnable did work.
+ bool MTTaskRunnableProcessedTask() {
+ MOZ_ASSERT(NS_IsMainThread());
+ return mMTTaskRunnableProcessedTask;
+ }
+
+ static int32_t GetPoolThreadCount();
+ static size_t GetThreadStackSize();
+
+ private:
+ friend void ThreadFuncPoolThread(void* aIndex);
+
+ void InitializeThreadPool();
+
+ // This gets the next (highest priority) task that is only allowed to execute
+ // on the main thread, if any, and executes it.
+ // Returns true if it succeeded.
+ bool ExecuteNextTaskOnlyMainThreadInternal(const MutexAutoLock& aProofOfLock);
+
+ // The guts of ExecuteNextTaskOnlyMainThreadInternal, which get idle handling
+ // wrapped around them. Returns whether a task actually ran.
+ bool DoExecuteNextTaskOnlyMainThreadInternal(
+ const MutexAutoLock& aProofOfLock);
+
+ Task* GetFinalDependency(Task* aTask);
+ void MaybeInterruptTask(Task* aTask);
+ Task* GetHighestPriorityMTTask();
+
+ void EnsureMainThreadTasksScheduled();
+
+ void ProcessUpdatedPriorityModifier(TaskManager* aManager);
+
+ void ShutdownThreadPoolInternal();
+ void ShutdownInternal();
+
+ void RunPoolThread();
+
+ static std::unique_ptr<TaskController> sSingleton;
+ static StaticMutex sSingletonMutex MOZ_UNANNOTATED;
+
+ // This protects access to the task graph.
+ Mutex mGraphMutex MOZ_UNANNOTATED;
+
+ // This protects thread pool initialization. We cannot do this from within
+ // the GraphMutex, since thread creation on Windows can generate events on
+ // the main thread that need to be handled.
+ Mutex mPoolInitializationMutex =
+ Mutex("TaskController::mPoolInitializationMutex");
+ // Created under the PoolInitialization mutex, then never extended, and
+ // only freed when the object is freed. mThread is set at creation time;
+ // mCurrentTask and mEffectiveTaskPriority are only accessed from the
+ // thread, so no locking is needed to access this.
+ std::vector<PoolThread> mPoolThreads;
+
+ CondVar mThreadPoolCV;
+ CondVar mMainThreadCV;
+
+ // Variables below are protected by mGraphMutex.
+
+ std::stack<RefPtr<Task>> mCurrentTasksMT;
+
+ // A list of all tasks ordered by priority.
+ std::set<RefPtr<Task>, Task::PriorityCompare> mThreadableTasks;
+ std::set<RefPtr<Task>, Task::PriorityCompare> mMainThreadTasks;
+
+ // TaskManagers currently active.
+ // We can use a raw pointer since tasks always hold on to their TaskManager.
+ std::set<TaskManager*> mTaskManagers;
+
+ // This ensures we keep running the main thread if we processed a task there.
+ bool mMayHaveMainThreadTask = true;
+ bool mShuttingDown = false;
+
+ // This stores whether the last main thread task runnable did work.
+ // Accessed only on MainThread
+ bool mMTTaskRunnableProcessedTask = false;
+
+ // Whether our thread pool is initialized. We use this currently to avoid
+ // starting the threads in processes where it's never used. This is protected
+ // by mPoolInitializationMutex.
+ bool mThreadPoolInitialized = false;
+
+ // Whether we have scheduled a runnable on the main thread event loop.
+ // This is used for nsIRunnable compatibility.
+ RefPtr<nsIRunnable> mMTProcessingRunnable;
+ RefPtr<nsIRunnable> mMTBlockingProcessingRunnable;
+
+ // XXX - Thread observer to notify when a new event has been dispatched
+ // Set immediately, then simply accessed from any thread
+ nsIThreadObserver* mObserver = nullptr;
+ // XXX - External condvar to notify when we have received an event
+ CondVar* mExternalCondVar = nullptr;
+ // Idle task manager so we can properly do idle state stuff.
+ RefPtr<IdleTaskManager> mIdleTaskManager;
+
+ // How many times the main thread was empty.
+ std::atomic<uint64_t> mRunOutOfMTTasksCounter;
+
+ // Our tracking of our performance counter and long task state,
+ // shared with nsThread.
+ // Set once when MainThread is created, never changed, only accessed from
+ // DoExecuteNextTaskOnlyMainThreadInternal()
+ PerformanceCounterState* mPerformanceCounterState = nullptr;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_TaskController_h
diff --git a/xpcom/threads/TaskDispatcher.h b/xpcom/threads/TaskDispatcher.h
new file mode 100644
index 0000000000..1f27c32c7d
--- /dev/null
+++ b/xpcom/threads/TaskDispatcher.h
@@ -0,0 +1,304 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(TaskDispatcher_h_)
+# define TaskDispatcher_h_
+
+# include <queue>
+
+# include "mozilla/AbstractThread.h"
+# include "mozilla/Maybe.h"
+# include "mozilla/ProfilerRunnable.h"
+# include "mozilla/UniquePtr.h"
+# include "nsIDirectTaskDispatcher.h"
+# include "nsISupportsImpl.h"
+# include "nsTArray.h"
+# include "nsThreadUtils.h"
+
+namespace mozilla {
+
+class SimpleTaskQueue {
+ public:
+ SimpleTaskQueue() = default;
+ virtual ~SimpleTaskQueue() = default;
+
+ void AddTask(already_AddRefed<nsIRunnable> aRunnable) {
+ if (!mTasks) {
+ mTasks.emplace();
+ }
+ mTasks->push(std::move(aRunnable));
+ }
+
+ void DrainTasks() {
+ if (!mTasks) {
+ return;
+ }
+ auto& queue = mTasks.ref();
+ while (!queue.empty()) {
+ nsCOMPtr<nsIRunnable> r = std::move(queue.front());
+ queue.pop();
+ AUTO_PROFILE_FOLLOWING_RUNNABLE(r);
+ r->Run();
+ }
+ }
+
+ bool HaveTasks() const { return mTasks && !mTasks->empty(); }
+
+ private:
+ // We use a Maybe<> because (a) when used for DirectTasks it often doesn't get
+ // anything put into it, and (b) the std::queue implementation in GNU
+ // libstdc++ does two largish heap allocations when creating a new std::queue.
+ Maybe<std::queue<nsCOMPtr<nsIRunnable>>> mTasks;
+};
+
+/*
+ * A classic approach to cross-thread communication is to dispatch asynchronous
+ * runnables to perform updates on other threads. This generally works well, but
+ * there are sometimes reasons why we might want to delay the actual dispatch of
+ * these tasks until a specified moment. At present, this is primarily useful to
+ * ensure that mirrored state gets updated atomically - but there may be other
+ * applications as well.
+ *
+ * TaskDispatcher is a general abstract class that accepts tasks and dispatches
+ * them at some later point. These groups of tasks are per-target-thread, and
+ * contain separate queues for several kinds of tasks (see comments below). -
+ * "state change tasks" (which run first, and are intended to be used to update
+ * the value held by mirrors), and regular tasks, which are other arbitrary
+ * operations that the are gated to run after all the state changes have
+ * completed.
+ */
+class TaskDispatcher {
+ public:
+ TaskDispatcher() = default;
+ virtual ~TaskDispatcher() = default;
+
+ // Direct tasks are run directly (rather than dispatched asynchronously) when
+ // the tail dispatcher fires. A direct task may cause other tasks to be added
+ // to the tail dispatcher.
+ virtual void AddDirectTask(already_AddRefed<nsIRunnable> aRunnable) = 0;
+
+ // State change tasks are dispatched asynchronously always run before regular
+ // tasks. They are intended to be used to update the value held by mirrors
+ // before any other dispatched tasks are run on the target thread.
+ virtual void AddStateChangeTask(AbstractThread* aThread,
+ already_AddRefed<nsIRunnable> aRunnable) = 0;
+
+ // Regular tasks are dispatched asynchronously, and run after state change
+ // tasks.
+ virtual nsresult AddTask(AbstractThread* aThread,
+ already_AddRefed<nsIRunnable> aRunnable) = 0;
+
+ virtual nsresult DispatchTasksFor(AbstractThread* aThread) = 0;
+ virtual bool HasTasksFor(AbstractThread* aThread) = 0;
+ virtual void DrainDirectTasks() = 0;
+};
+
+/*
+ * AutoTaskDispatcher is a stack-scoped TaskDispatcher implementation that fires
+ * its queued tasks when it is popped off the stack.
+ */
+class AutoTaskDispatcher : public TaskDispatcher {
+ public:
+ explicit AutoTaskDispatcher(nsIDirectTaskDispatcher* aDirectTaskDispatcher,
+ bool aIsTailDispatcher = false)
+ : mDirectTaskDispatcher(aDirectTaskDispatcher),
+ mIsTailDispatcher(aIsTailDispatcher) {}
+
+ ~AutoTaskDispatcher() {
+ // Given that direct tasks may trigger other code that uses the tail
+ // dispatcher, it's better to avoid processing them in the tail dispatcher's
+ // destructor. So we require TailDispatchers to manually invoke
+ // DrainDirectTasks before the AutoTaskDispatcher gets destroyed. In truth,
+ // this is only necessary in the case where this AutoTaskDispatcher can be
+ // accessed by the direct tasks it dispatches (true for TailDispatchers, but
+ // potentially not true for other hypothetical AutoTaskDispatchers). Feel
+ // free to loosen this restriction to apply only to mIsTailDispatcher if a
+ // use-case requires it.
+ MOZ_ASSERT(!HaveDirectTasks());
+
+ for (size_t i = 0; i < mTaskGroups.Length(); ++i) {
+ DispatchTaskGroup(std::move(mTaskGroups[i]));
+ }
+ }
+
+ bool HaveDirectTasks() {
+ return mDirectTaskDispatcher && mDirectTaskDispatcher->HaveDirectTasks();
+ }
+
+ void DrainDirectTasks() override {
+ if (mDirectTaskDispatcher) {
+ mDirectTaskDispatcher->DrainDirectTasks();
+ }
+ }
+
+ void AddDirectTask(already_AddRefed<nsIRunnable> aRunnable) override {
+ MOZ_ASSERT(mDirectTaskDispatcher);
+ mDirectTaskDispatcher->DispatchDirectTask(std::move(aRunnable));
+ }
+
+ void AddStateChangeTask(AbstractThread* aThread,
+ already_AddRefed<nsIRunnable> aRunnable) override {
+ nsCOMPtr<nsIRunnable> r = aRunnable;
+ MOZ_RELEASE_ASSERT(r);
+ EnsureTaskGroup(aThread).mStateChangeTasks.AppendElement(r.forget());
+ }
+
+ nsresult AddTask(AbstractThread* aThread,
+ already_AddRefed<nsIRunnable> aRunnable) override {
+ nsCOMPtr<nsIRunnable> r = aRunnable;
+ MOZ_RELEASE_ASSERT(r);
+ // To preserve the event order, we need to append a new group if the last
+ // group is not targeted for |aThread|.
+ // See https://bugzilla.mozilla.org/show_bug.cgi?id=1318226&mark=0-3#c0
+ // for the details of the issue.
+ if (mTaskGroups.Length() == 0 ||
+ mTaskGroups.LastElement()->mThread != aThread) {
+ mTaskGroups.AppendElement(new PerThreadTaskGroup(aThread));
+ }
+
+ PerThreadTaskGroup& group = *mTaskGroups.LastElement();
+ group.mRegularTasks.AppendElement(r.forget());
+
+ return NS_OK;
+ }
+
+ bool HasTasksFor(AbstractThread* aThread) override {
+ return !!GetTaskGroup(aThread) ||
+ (aThread == AbstractThread::GetCurrent() && HaveDirectTasks());
+ }
+
+ nsresult DispatchTasksFor(AbstractThread* aThread) override {
+ nsresult rv = NS_OK;
+
+ // Dispatch all groups that match |aThread|.
+ for (size_t i = 0; i < mTaskGroups.Length(); ++i) {
+ if (mTaskGroups[i]->mThread == aThread) {
+ nsresult rv2 = DispatchTaskGroup(std::move(mTaskGroups[i]));
+
+ if (NS_WARN_IF(NS_FAILED(rv2)) && NS_SUCCEEDED(rv)) {
+ // We should try our best to call DispatchTaskGroup() as much as
+ // possible and return an error if any of DispatchTaskGroup() calls
+ // failed.
+ rv = rv2;
+ }
+
+ mTaskGroups.RemoveElementAt(i--);
+ }
+ }
+
+ return rv;
+ }
+
+ private:
+ struct PerThreadTaskGroup {
+ public:
+ explicit PerThreadTaskGroup(AbstractThread* aThread) : mThread(aThread) {
+ MOZ_COUNT_CTOR(PerThreadTaskGroup);
+ }
+
+ MOZ_COUNTED_DTOR(PerThreadTaskGroup)
+
+ RefPtr<AbstractThread> mThread;
+ nsTArray<nsCOMPtr<nsIRunnable>> mStateChangeTasks;
+ nsTArray<nsCOMPtr<nsIRunnable>> mRegularTasks;
+ };
+
+ class TaskGroupRunnable : public Runnable {
+ public:
+ explicit TaskGroupRunnable(UniquePtr<PerThreadTaskGroup>&& aTasks)
+ : Runnable("AutoTaskDispatcher::TaskGroupRunnable"),
+ mTasks(std::move(aTasks)) {}
+
+ NS_IMETHOD Run() override {
+ // State change tasks get run all together before any code is run, so
+ // that all state changes are made in an atomic unit.
+ for (size_t i = 0; i < mTasks->mStateChangeTasks.Length(); ++i) {
+ mTasks->mStateChangeTasks[i]->Run();
+ }
+
+ // Once the state changes have completed, drain any direct tasks
+ // generated by those state changes (i.e. watcher notification tasks).
+ // This needs to be outside the loop because we don't want to run code
+ // that might observe intermediate states.
+ MaybeDrainDirectTasks();
+
+ for (size_t i = 0; i < mTasks->mRegularTasks.Length(); ++i) {
+ AUTO_PROFILE_FOLLOWING_RUNNABLE(mTasks->mRegularTasks[i]);
+ mTasks->mRegularTasks[i]->Run();
+
+ // Scope direct tasks tightly to the task that generated them.
+ MaybeDrainDirectTasks();
+ }
+
+ return NS_OK;
+ }
+
+ private:
+ void MaybeDrainDirectTasks() {
+ AbstractThread* currentThread = AbstractThread::GetCurrent();
+ if (currentThread && currentThread->MightHaveTailTasks()) {
+ currentThread->TailDispatcher().DrainDirectTasks();
+ }
+ }
+
+ UniquePtr<PerThreadTaskGroup> mTasks;
+ };
+
+ PerThreadTaskGroup& EnsureTaskGroup(AbstractThread* aThread) {
+ PerThreadTaskGroup* existing = GetTaskGroup(aThread);
+ if (existing) {
+ return *existing;
+ }
+
+ mTaskGroups.AppendElement(new PerThreadTaskGroup(aThread));
+ return *mTaskGroups.LastElement();
+ }
+
+ PerThreadTaskGroup* GetTaskGroup(AbstractThread* aThread) {
+ for (size_t i = 0; i < mTaskGroups.Length(); ++i) {
+ if (mTaskGroups[i]->mThread == aThread) {
+ return mTaskGroups[i].get();
+ }
+ }
+
+ // Not found.
+ return nullptr;
+ }
+
+ nsresult DispatchTaskGroup(UniquePtr<PerThreadTaskGroup> aGroup) {
+ RefPtr<AbstractThread> thread = aGroup->mThread;
+
+ AbstractThread::DispatchReason reason =
+ mIsTailDispatcher ? AbstractThread::TailDispatch
+ : AbstractThread::NormalDispatch;
+ nsCOMPtr<nsIRunnable> r = new TaskGroupRunnable(std::move(aGroup));
+ return thread->Dispatch(r.forget(), reason);
+ }
+
+ // Task groups, organized by thread.
+ nsTArray<UniquePtr<PerThreadTaskGroup>> mTaskGroups;
+
+ nsCOMPtr<nsIDirectTaskDispatcher> mDirectTaskDispatcher;
+ // True if this TaskDispatcher represents the tail dispatcher for the thread
+ // upon which it runs.
+ const bool mIsTailDispatcher;
+};
+
+// Little utility class to allow declaring AutoTaskDispatcher as a default
+// parameter for methods that take a TaskDispatcher&.
+template <typename T>
+class PassByRef {
+ public:
+ PassByRef() = default;
+ operator T&() { return mVal; }
+
+ private:
+ T mVal;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/TaskQueue.cpp b/xpcom/threads/TaskQueue.cpp
new file mode 100644
index 0000000000..9c2019de95
--- /dev/null
+++ b/xpcom/threads/TaskQueue.cpp
@@ -0,0 +1,347 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/TaskQueue.h"
+
+#include "mozilla/DelayedRunnable.h"
+#include "mozilla/ProfilerRunnable.h"
+#include "nsIEventTarget.h"
+#include "nsITargetShutdownTask.h"
+#include "nsThreadUtils.h"
+#include "nsQueryObject.h"
+
+namespace mozilla {
+
+// Handle for a TaskQueue being tracked by a TaskQueueTracker. When created,
+// it is registered with the TaskQueueTracker, and when destroyed it is
+// unregistered. Holds a threadsafe weak reference to the TaskQueue.
+class TaskQueueTrackerEntry final
+ : private LinkedListElement<TaskQueueTrackerEntry> {
+ public:
+ TaskQueueTrackerEntry(TaskQueueTracker* aTracker,
+ const RefPtr<TaskQueue>& aQueue)
+ : mTracker(aTracker), mQueue(aQueue) {
+ MutexAutoLock lock(mTracker->mMutex);
+ mTracker->mEntries.insertFront(this);
+ }
+ ~TaskQueueTrackerEntry() {
+ MutexAutoLock lock(mTracker->mMutex);
+ removeFrom(mTracker->mEntries);
+ }
+
+ TaskQueueTrackerEntry(const TaskQueueTrackerEntry&) = delete;
+ TaskQueueTrackerEntry(TaskQueueTrackerEntry&&) = delete;
+ TaskQueueTrackerEntry& operator=(const TaskQueueTrackerEntry&) = delete;
+ TaskQueueTrackerEntry& operator=(TaskQueueTrackerEntry&&) = delete;
+
+ RefPtr<TaskQueue> GetQueue() const { return RefPtr<TaskQueue>(mQueue); }
+
+ private:
+ friend class LinkedList<TaskQueueTrackerEntry>;
+ friend class LinkedListElement<TaskQueueTrackerEntry>;
+
+ const RefPtr<TaskQueueTracker> mTracker;
+ const ThreadSafeWeakPtr<TaskQueue> mQueue;
+};
+
+RefPtr<TaskQueue> TaskQueue::Create(already_AddRefed<nsIEventTarget> aTarget,
+ const char* aName,
+ bool aSupportsTailDispatch) {
+ nsCOMPtr<nsIEventTarget> target(std::move(aTarget));
+ RefPtr<TaskQueue> queue =
+ new TaskQueue(do_AddRef(target), aName, aSupportsTailDispatch);
+
+ // If |target| is a TaskQueueTracker, register this TaskQueue with it. It will
+ // be unregistered when the TaskQueue is destroyed or shut down.
+ if (RefPtr<TaskQueueTracker> tracker = do_QueryObject(target)) {
+ MonitorAutoLock lock(queue->mQueueMonitor);
+ queue->mTrackerEntry = MakeUnique<TaskQueueTrackerEntry>(tracker, queue);
+ }
+
+ return queue;
+}
+
+TaskQueue::TaskQueue(already_AddRefed<nsIEventTarget> aTarget,
+ const char* aName, bool aSupportsTailDispatch)
+ : AbstractThread(aSupportsTailDispatch),
+ mTarget(aTarget),
+ mQueueMonitor("TaskQueue::Queue"),
+ mTailDispatcher(nullptr),
+ mIsRunning(false),
+ mIsShutdown(false),
+ mName(aName) {}
+
+TaskQueue::~TaskQueue() {
+ // We should never free the TaskQueue if it was destroyed abnormally, meaning
+ // that all cleanup tasks should be complete if we do.
+ MOZ_ASSERT(mShutdownTasks.IsEmpty());
+}
+
+NS_IMPL_ADDREF_INHERITED(TaskQueue, SupportsThreadSafeWeakPtr<TaskQueue>)
+NS_IMPL_RELEASE_INHERITED(TaskQueue, SupportsThreadSafeWeakPtr<TaskQueue>)
+NS_IMPL_QUERY_INTERFACE(TaskQueue, nsIDirectTaskDispatcher,
+ nsISerialEventTarget, nsIEventTarget)
+
+TaskDispatcher& TaskQueue::TailDispatcher() {
+ MOZ_ASSERT(IsCurrentThreadIn());
+ MOZ_ASSERT(mTailDispatcher);
+ return *mTailDispatcher;
+}
+
+// Note aRunnable is passed by ref to support conditional ownership transfer.
+// See Dispatch() in TaskQueue.h for more details.
+nsresult TaskQueue::DispatchLocked(nsCOMPtr<nsIRunnable>& aRunnable,
+ uint32_t aFlags, DispatchReason aReason) {
+ mQueueMonitor.AssertCurrentThreadOwns();
+
+ // Continue to allow dispatches after shutdown until the last message has been
+ // processed, at which point no more messages will be accepted.
+ if (mIsShutdown && !mIsRunning) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ AbstractThread* currentThread;
+ if (aReason != TailDispatch && (currentThread = GetCurrent()) &&
+ RequiresTailDispatch(currentThread) &&
+ currentThread->IsTailDispatcherAvailable()) {
+ MOZ_ASSERT(aFlags == NS_DISPATCH_NORMAL,
+ "Tail dispatch doesn't support flags");
+ return currentThread->TailDispatcher().AddTask(this, aRunnable.forget());
+ }
+
+ LogRunnable::LogDispatch(aRunnable);
+ mTasks.Push({std::move(aRunnable), aFlags});
+
+ if (mIsRunning) {
+ return NS_OK;
+ }
+ RefPtr<nsIRunnable> runner(new Runner(this));
+ nsresult rv = mTarget->Dispatch(runner.forget(), aFlags);
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Failed to dispatch runnable to run TaskQueue");
+ return rv;
+ }
+ mIsRunning = true;
+
+ return NS_OK;
+}
+
+nsresult TaskQueue::RegisterShutdownTask(nsITargetShutdownTask* aTask) {
+ NS_ENSURE_ARG(aTask);
+
+ MonitorAutoLock mon(mQueueMonitor);
+ if (mIsShutdown) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ MOZ_ASSERT(!mShutdownTasks.Contains(aTask));
+ mShutdownTasks.AppendElement(aTask);
+ return NS_OK;
+}
+
+nsresult TaskQueue::UnregisterShutdownTask(nsITargetShutdownTask* aTask) {
+ NS_ENSURE_ARG(aTask);
+
+ MonitorAutoLock mon(mQueueMonitor);
+ if (mIsShutdown) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ return mShutdownTasks.RemoveElement(aTask) ? NS_OK : NS_ERROR_UNEXPECTED;
+}
+
+void TaskQueue::AwaitIdle() {
+ MonitorAutoLock mon(mQueueMonitor);
+ AwaitIdleLocked();
+}
+
+void TaskQueue::AwaitIdleLocked() {
+ // Make sure there are no tasks for this queue waiting in the caller's tail
+ // dispatcher.
+ MOZ_ASSERT_IF(AbstractThread::GetCurrent(),
+ !AbstractThread::GetCurrent()->HasTailTasksFor(this));
+
+ mQueueMonitor.AssertCurrentThreadOwns();
+ MOZ_ASSERT(mIsRunning || mTasks.IsEmpty());
+ while (mIsRunning) {
+ mQueueMonitor.Wait();
+ }
+}
+
+void TaskQueue::AwaitShutdownAndIdle() {
+ MOZ_ASSERT(!IsCurrentThreadIn());
+ // Make sure there are no tasks for this queue waiting in the caller's tail
+ // dispatcher.
+ MOZ_ASSERT_IF(AbstractThread::GetCurrent(),
+ !AbstractThread::GetCurrent()->HasTailTasksFor(this));
+
+ MonitorAutoLock mon(mQueueMonitor);
+ while (!mIsShutdown) {
+ mQueueMonitor.Wait();
+ }
+ AwaitIdleLocked();
+}
+RefPtr<ShutdownPromise> TaskQueue::BeginShutdown() {
+ // Dispatch any tasks for this queue waiting in the caller's tail dispatcher,
+ // since this is the last opportunity to do so.
+ if (AbstractThread* currentThread = AbstractThread::GetCurrent()) {
+ currentThread->TailDispatchTasksFor(this);
+ }
+
+ MonitorAutoLock mon(mQueueMonitor);
+ // Dispatch any cleanup tasks to the queue before we put it into full
+ // shutdown.
+ for (auto& task : mShutdownTasks) {
+ nsCOMPtr runnable{task->AsRunnable()};
+ MOZ_ALWAYS_SUCCEEDS(
+ DispatchLocked(runnable, NS_DISPATCH_NORMAL, TailDispatch));
+ }
+ mShutdownTasks.Clear();
+ mIsShutdown = true;
+
+ RefPtr<ShutdownPromise> p = mShutdownPromise.Ensure(__func__);
+ MaybeResolveShutdown();
+ mon.NotifyAll();
+ return p;
+}
+
+void TaskQueue::MaybeResolveShutdown() {
+ mQueueMonitor.AssertCurrentThreadOwns();
+ if (mIsShutdown && !mIsRunning) {
+ mShutdownPromise.ResolveIfExists(true, __func__);
+ // Disconnect from our target as we won't try to dispatch any more events.
+ mTrackerEntry = nullptr;
+ mTarget = nullptr;
+ }
+}
+
+bool TaskQueue::IsEmpty() {
+ MonitorAutoLock mon(mQueueMonitor);
+ return mTasks.IsEmpty();
+}
+
+bool TaskQueue::IsCurrentThreadIn() const {
+ bool in = mRunningThread == PR_GetCurrentThread();
+ return in;
+}
+
+nsresult TaskQueue::Runner::Run() {
+ TaskStruct event;
+ {
+ MonitorAutoLock mon(mQueue->mQueueMonitor);
+ MOZ_ASSERT(mQueue->mIsRunning);
+ if (mQueue->mTasks.IsEmpty()) {
+ mQueue->mIsRunning = false;
+ mQueue->MaybeResolveShutdown();
+ mon.NotifyAll();
+ return NS_OK;
+ }
+ event = std::move(mQueue->mTasks.FirstElement());
+ mQueue->mTasks.Pop();
+ }
+ MOZ_ASSERT(event.event);
+
+ // Note that dropping the queue monitor before running the task, and
+ // taking the monitor again after the task has run ensures we have memory
+ // fences enforced. This means that if the object we're calling wasn't
+ // designed to be threadsafe, it will be, provided we're only calling it
+ // in this task queue.
+ {
+ AutoTaskGuard g(mQueue);
+ SerialEventTargetGuard tg(mQueue);
+ {
+ LogRunnable::Run log(event.event);
+
+ AUTO_PROFILE_FOLLOWING_RUNNABLE(event.event);
+ event.event->Run();
+
+ // Drop the reference to event. The event will hold a reference to the
+ // object it's calling, and we don't want to keep it alive, it may be
+ // making assumptions what holds references to it. This is especially
+ // the case if the object is waiting for us to shutdown, so that it
+ // can shutdown (like in the MediaDecoderStateMachine's SHUTDOWN case).
+ event.event = nullptr;
+ }
+ }
+
+ {
+ MonitorAutoLock mon(mQueue->mQueueMonitor);
+ if (mQueue->mTasks.IsEmpty()) {
+ // No more events to run. Exit the task runner.
+ mQueue->mIsRunning = false;
+ mQueue->MaybeResolveShutdown();
+ mon.NotifyAll();
+ return NS_OK;
+ }
+ }
+
+ // There's at least one more event that we can run. Dispatch this Runner
+ // to the target again to ensure it runs again. Note that we don't just
+ // run in a loop here so that we don't hog the target. This means we may
+ // run on another thread next time, but we rely on the memory fences from
+ // mQueueMonitor for thread safety of non-threadsafe tasks.
+ nsresult rv;
+ {
+ MonitorAutoLock mon(mQueue->mQueueMonitor);
+ rv = mQueue->mTarget->Dispatch(
+ this, mQueue->mTasks.FirstElement().flags | NS_DISPATCH_AT_END);
+ }
+ if (NS_FAILED(rv)) {
+ // Failed to dispatch, shutdown!
+ MonitorAutoLock mon(mQueue->mQueueMonitor);
+ mQueue->mIsRunning = false;
+ mQueue->mIsShutdown = true;
+ mQueue->MaybeResolveShutdown();
+ mon.NotifyAll();
+ }
+
+ return NS_OK;
+}
+
+//-----------------------------------------------------------------------------
+// nsIDirectTaskDispatcher
+//-----------------------------------------------------------------------------
+
+NS_IMETHODIMP
+TaskQueue::DispatchDirectTask(already_AddRefed<nsIRunnable> aEvent) {
+ if (!IsCurrentThreadIn()) {
+ return NS_ERROR_FAILURE;
+ }
+ mDirectTasks.AddTask(std::move(aEvent));
+ return NS_OK;
+}
+
+NS_IMETHODIMP TaskQueue::DrainDirectTasks() {
+ if (!IsCurrentThreadIn()) {
+ return NS_ERROR_FAILURE;
+ }
+ mDirectTasks.DrainTasks();
+ return NS_OK;
+}
+
+NS_IMETHODIMP TaskQueue::HaveDirectTasks(bool* aValue) {
+ if (!IsCurrentThreadIn()) {
+ return NS_ERROR_FAILURE;
+ }
+
+ *aValue = mDirectTasks.HaveTasks();
+ return NS_OK;
+}
+
+nsTArray<RefPtr<TaskQueue>> TaskQueueTracker::GetAllTrackedTaskQueues() {
+ MutexAutoLock lock(mMutex);
+ nsTArray<RefPtr<TaskQueue>> queues;
+ for (auto* entry : mEntries) {
+ if (auto queue = entry->GetQueue()) {
+ queues.AppendElement(queue);
+ }
+ }
+ return queues;
+}
+
+TaskQueueTracker::~TaskQueueTracker() = default;
+
+} // namespace mozilla
diff --git a/xpcom/threads/TaskQueue.h b/xpcom/threads/TaskQueue.h
new file mode 100644
index 0000000000..b9321866a9
--- /dev/null
+++ b/xpcom/threads/TaskQueue.h
@@ -0,0 +1,281 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef TaskQueue_h_
+#define TaskQueue_h_
+
+#include "mozilla/AbstractThread.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/Queue.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/TaskDispatcher.h"
+#include "mozilla/ThreadSafeWeakPtr.h"
+#include "nsIDirectTaskDispatcher.h"
+#include "nsThreadUtils.h"
+
+namespace mozilla {
+
+typedef MozPromise<bool, bool, false> ShutdownPromise;
+
+class TaskQueueTrackerEntry;
+
+// Abstracts executing runnables in order on an arbitrary event target. The
+// runnables dispatched to the TaskQueue will be executed in the order in which
+// they're received, and are guaranteed to not be executed concurrently.
+// They may be executed on different threads, and a memory barrier is used
+// to make this threadsafe for objects that aren't already threadsafe.
+//
+// Note, since a TaskQueue can also be converted to an nsIEventTarget using
+// WrapAsEventTarget() its possible to construct a hierarchy of TaskQueues.
+// Consider these three TaskQueues:
+//
+// TQ1 dispatches to the main thread
+// TQ2 dispatches to TQ1
+// TQ3 dispatches to TQ1
+//
+// This ensures there is only ever a single runnable from the entire chain on
+// the main thread. It also ensures that TQ2 and TQ3 only have a single
+// runnable in TQ1 at any time.
+//
+// This arrangement lets you prioritize work by dispatching runnables directly
+// to TQ1. You can issue many runnables for important work. Meanwhile the TQ2
+// and TQ3 work will always execute at most one runnable and then yield.
+//
+// A TaskQueue does not require explicit shutdown, however it provides a
+// BeginShutdown() method that places TaskQueue in a shut down state and returns
+// a promise that gets resolved once all pending tasks have completed
+class TaskQueue final : public AbstractThread,
+ public nsIDirectTaskDispatcher,
+ public SupportsThreadSafeWeakPtr<TaskQueue> {
+ class EventTargetWrapper;
+
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_NSIDIRECTTASKDISPATCHER
+ MOZ_DECLARE_REFCOUNTED_TYPENAME(TaskQueue)
+
+ static RefPtr<TaskQueue> Create(already_AddRefed<nsIEventTarget> aTarget,
+ const char* aName,
+ bool aSupportsTailDispatch = false);
+
+ TaskDispatcher& TailDispatcher() override;
+
+ NS_IMETHOD Dispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aFlags) override {
+ nsCOMPtr<nsIRunnable> runnable = aEvent;
+ {
+ MonitorAutoLock mon(mQueueMonitor);
+ return DispatchLocked(/* passed by ref */ runnable, aFlags,
+ NormalDispatch);
+ }
+ // If the ownership of |r| is not transferred in DispatchLocked() due to
+ // dispatch failure, it will be deleted here outside the lock. We do so
+ // since the destructor of the runnable might access TaskQueue and result
+ // in deadlocks.
+ }
+
+ [[nodiscard]] nsresult Dispatch(
+ already_AddRefed<nsIRunnable> aRunnable,
+ DispatchReason aReason = NormalDispatch) override {
+ nsCOMPtr<nsIRunnable> r = aRunnable;
+ {
+ MonitorAutoLock mon(mQueueMonitor);
+ return DispatchLocked(/* passed by ref */ r, NS_DISPATCH_NORMAL, aReason);
+ }
+ // If the ownership of |r| is not transferred in DispatchLocked() due to
+ // dispatch failure, it will be deleted here outside the lock. We do so
+ // since the destructor of the runnable might access TaskQueue and result
+ // in deadlocks.
+ }
+
+ // So we can access nsIEventTarget::Dispatch(nsIRunnable*, uint32_t aFlags)
+ using nsIEventTarget::Dispatch;
+
+ NS_IMETHOD RegisterShutdownTask(nsITargetShutdownTask* aTask) override;
+ NS_IMETHOD UnregisterShutdownTask(nsITargetShutdownTask* aTask) override;
+
+ using CancelPromise = MozPromise<bool, bool, false>;
+
+ // Dispatches a task to cancel any pending DelayedRunnables. Idempotent. Only
+ // dispatches the task on the first call. Creating DelayedRunnables after this
+ // is called will result in assertion failures.
+ RefPtr<CancelPromise> CancelDelayedRunnables();
+
+ // Puts the queue in a shutdown state and returns immediately. The queue will
+ // remain alive at least until all the events are drained, because the Runners
+ // hold a strong reference to the task queue, and one of them is always held
+ // by the target event queue when the task queue is non-empty.
+ //
+ // The returned promise is resolved when the queue goes empty.
+ RefPtr<ShutdownPromise> BeginShutdown();
+
+ // Blocks until all task finish executing.
+ void AwaitIdle();
+
+ // Blocks until the queue is flagged for shutdown and all tasks have finished
+ // executing.
+ void AwaitShutdownAndIdle();
+
+ bool IsEmpty();
+
+ // Returns true if the current thread is currently running a Runnable in
+ // the task queue.
+ bool IsCurrentThreadIn() const override;
+ using nsISerialEventTarget::IsOnCurrentThread;
+
+ private:
+ friend class SupportsThreadSafeWeakPtr<TaskQueue>;
+
+ TaskQueue(already_AddRefed<nsIEventTarget> aTarget, const char* aName,
+ bool aSupportsTailDispatch);
+
+ virtual ~TaskQueue();
+
+ // Blocks until all task finish executing. Called internally by methods
+ // that need to wait until the task queue is idle.
+ // mQueueMonitor must be held.
+ void AwaitIdleLocked();
+
+ nsresult DispatchLocked(nsCOMPtr<nsIRunnable>& aRunnable, uint32_t aFlags,
+ DispatchReason aReason = NormalDispatch);
+
+ void MaybeResolveShutdown();
+
+ nsCOMPtr<nsIEventTarget> mTarget MOZ_GUARDED_BY(mQueueMonitor);
+
+ // Handle for this TaskQueue being registered with our target if it implements
+ // TaskQueueTracker.
+ UniquePtr<TaskQueueTrackerEntry> mTrackerEntry MOZ_GUARDED_BY(mQueueMonitor);
+
+ // Monitor that protects the queue, mIsRunning, mIsShutdown and
+ // mShutdownTasks;
+ Monitor mQueueMonitor;
+
+ typedef struct TaskStruct {
+ nsCOMPtr<nsIRunnable> event;
+ uint32_t flags;
+ } TaskStruct;
+
+ // Queue of tasks to run.
+ Queue<TaskStruct> mTasks MOZ_GUARDED_BY(mQueueMonitor);
+
+ // List of tasks to run during shutdown.
+ nsTArray<nsCOMPtr<nsITargetShutdownTask>> mShutdownTasks
+ MOZ_GUARDED_BY(mQueueMonitor);
+
+ // The thread currently running the task queue. We store a reference
+ // to this so that IsCurrentThreadIn() can tell if the current thread
+ // is the thread currently running in the task queue.
+ //
+ // This may be read on any thread, but may only be written on mRunningThread.
+ // The thread can't die while we're running in it, and we only use it for
+ // pointer-comparison with the current thread anyway - so we make it atomic
+ // and don't refcount it.
+ Atomic<PRThread*> mRunningThread;
+
+ // RAII class that gets instantiated for each dispatched task.
+ class AutoTaskGuard {
+ public:
+ explicit AutoTaskGuard(TaskQueue* aQueue)
+ : mQueue(aQueue), mLastCurrentThread(nullptr) {
+ // NB: We don't hold the lock to aQueue here. Don't do anything that
+ // might require it.
+ MOZ_ASSERT(!mQueue->mTailDispatcher);
+ mTaskDispatcher.emplace(aQueue,
+ /* aIsTailDispatcher = */ true);
+ mQueue->mTailDispatcher = mTaskDispatcher.ptr();
+
+ mLastCurrentThread = sCurrentThreadTLS.get();
+ sCurrentThreadTLS.set(aQueue);
+
+ MOZ_ASSERT(mQueue->mRunningThread == nullptr);
+ mQueue->mRunningThread = PR_GetCurrentThread();
+ }
+
+ ~AutoTaskGuard() {
+ mTaskDispatcher->DrainDirectTasks();
+ mTaskDispatcher.reset();
+
+ MOZ_ASSERT(mQueue->mRunningThread == PR_GetCurrentThread());
+ mQueue->mRunningThread = nullptr;
+
+ sCurrentThreadTLS.set(mLastCurrentThread);
+ mQueue->mTailDispatcher = nullptr;
+ }
+
+ private:
+ Maybe<AutoTaskDispatcher> mTaskDispatcher;
+ TaskQueue* mQueue;
+ AbstractThread* mLastCurrentThread;
+ };
+
+ TaskDispatcher* mTailDispatcher;
+
+ // True if we've dispatched an event to the target to execute events from
+ // the queue.
+ bool mIsRunning MOZ_GUARDED_BY(mQueueMonitor);
+
+ // True if we've started our shutdown process.
+ bool mIsShutdown MOZ_GUARDED_BY(mQueueMonitor);
+ MozPromiseHolder<ShutdownPromise> mShutdownPromise
+ MOZ_GUARDED_BY(mQueueMonitor);
+
+ // The name of this TaskQueue. Useful when debugging dispatch failures.
+ const char* const mName;
+
+ SimpleTaskQueue mDirectTasks;
+
+ class Runner : public Runnable {
+ public:
+ explicit Runner(TaskQueue* aQueue)
+ : Runnable("TaskQueue::Runner"), mQueue(aQueue) {}
+ NS_IMETHOD Run() override;
+
+ private:
+ RefPtr<TaskQueue> mQueue;
+ };
+};
+
+#define MOZILLA_TASKQUEUETRACKER_IID \
+ { \
+ 0x765c4b56, 0xd5f6, 0x4a9f, { \
+ 0x91, 0xcf, 0x51, 0x47, 0xb3, 0xc1, 0x7e, 0xa6 \
+ } \
+ }
+
+// XPCOM "interface" which may be implemented by nsIEventTarget implementations
+// which want to keep track of what TaskQueue instances are currently targeting
+// them. This may be used to asynchronously shutdown TaskQueues targeting a
+// threadpool or other event target before the threadpool goes away.
+//
+// This explicitly TaskQueue-aware tracker is used instead of
+// `nsITargetShutdownTask` as the operations required to shut down a TaskQueue
+// are asynchronous, which is not a requirement of that interface.
+class TaskQueueTracker : public nsISupports {
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(MOZILLA_TASKQUEUETRACKER_IID)
+
+ // Get a strong reference to every TaskQueue currently tracked by this
+ // TaskQueueTracker. May be called from any thraed.
+ nsTArray<RefPtr<TaskQueue>> GetAllTrackedTaskQueues();
+
+ protected:
+ virtual ~TaskQueueTracker();
+
+ private:
+ friend class TaskQueueTrackerEntry;
+
+ Mutex mMutex{"TaskQueueTracker"};
+ LinkedList<TaskQueueTrackerEntry> mEntries MOZ_GUARDED_BY(mMutex);
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(TaskQueueTracker, MOZILLA_TASKQUEUETRACKER_IID)
+
+} // namespace mozilla
+
+#endif // TaskQueue_h_
diff --git a/xpcom/threads/ThreadBound.h b/xpcom/threads/ThreadBound.h
new file mode 100644
index 0000000000..4d5e0088b5
--- /dev/null
+++ b/xpcom/threads/ThreadBound.h
@@ -0,0 +1,143 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// A class for values only accessible from a single designated thread.
+
+#ifndef mozilla_ThreadBound_h
+#define mozilla_ThreadBound_h
+
+#include "mozilla/Atomics.h"
+#include "prthread.h"
+
+#include <type_traits>
+
+namespace mozilla {
+
+template <typename T>
+class ThreadBound;
+
+namespace detail {
+
+template <bool Condition, typename T>
+struct AddConstIf {
+ using type = T;
+};
+
+template <typename T>
+struct AddConstIf<true, T> {
+ using type = typename std::add_const<T>::type;
+};
+
+} // namespace detail
+
+// A ThreadBound<T> is a T that can only be accessed by a specific
+// thread. To enforce this rule, the inner T is only accessible
+// through a non-copyable, immovable accessor object.
+// Given a ThreadBound<T> threadBoundData, it can be accessed like so:
+//
+// auto innerData = threadBoundData.Access();
+// innerData->DoStuff();
+//
+// Trying to access a ThreadBound<T> from a different thread will
+// trigger a MOZ_DIAGNOSTIC_ASSERT.
+// The encapsulated T is constructed during the construction of the
+// enclosing ThreadBound<T> by forwarding all of the latter's
+// constructor parameters to the former. A newly constructed
+// ThreadBound<T> is bound to the thread it's constructed in. It's
+// possible to rebind the data to some otherThread by calling
+//
+// threadBoundData.Transfer(otherThread);
+//
+// on the thread that threadBoundData is currently bound to, as long
+// as it's not currently being accessed. (Trying to rebind from
+// another thread or while an accessor exists will trigger an
+// assertion.)
+//
+// Note: A ThreadBound<T> may be destructed from any thread, not just
+// its designated thread at the time the destructor is invoked.
+template <typename T>
+class ThreadBound final {
+ public:
+ template <typename... Args>
+ explicit ThreadBound(Args&&... aArgs)
+ : mData(std::forward<Args>(aArgs)...),
+ mThread(PR_GetCurrentThread()),
+ mAccessCount(0) {}
+
+ ~ThreadBound() { AssertIsNotCurrentlyAccessed(); }
+
+ void Transfer(const PRThread* const aDest) {
+ AssertIsCorrectThread();
+ AssertIsNotCurrentlyAccessed();
+ mThread = aDest;
+ }
+
+ private:
+ T mData;
+
+ // This member is (potentially) accessed by multiple threads and is
+ // thus the first point of synchronization between them.
+ Atomic<const PRThread*, ReleaseAcquire> mThread;
+
+ // In order to support nested accesses (e.g. from different stack
+ // frames) it's necessary to maintain a counter of the existing
+ // accessor. Since it's possible to access a const ThreadBound, the
+ // counter is mutable. It's atomic because accessing it synchronizes
+ // access to mData (see comment in Accessor's constructor).
+ using AccessCountType = Atomic<int, ReleaseAcquire>;
+ mutable AccessCountType mAccessCount;
+
+ public:
+ template <bool IsConst>
+ class MOZ_STACK_CLASS Accessor final {
+ using DataType = typename detail::AddConstIf<IsConst, T>::type;
+
+ public:
+ explicit Accessor(
+ typename detail::AddConstIf<IsConst, ThreadBound>::type& aThreadBound)
+ : mData(aThreadBound.mData), mAccessCount(aThreadBound.mAccessCount) {
+ aThreadBound.AssertIsCorrectThread();
+
+ // This load/store serves as a memory fence that guards mData
+ // against accesses that would trip the thread assertion.
+ // (Otherwise one of the loads in the caller's instruction
+ // stream might be scheduled before the assertion.)
+ ++mAccessCount;
+ }
+
+ Accessor(const Accessor&) = delete;
+ Accessor(Accessor&&) = delete;
+ Accessor& operator=(const Accessor&) = delete;
+ Accessor& operator=(Accessor&&) = delete;
+
+ ~Accessor() { --mAccessCount; }
+
+ DataType* operator->() { return &mData; }
+
+ private:
+ DataType& mData;
+ AccessCountType& mAccessCount;
+ };
+
+ auto Access() { return Accessor<false>{*this}; }
+
+ auto Access() const { return Accessor<true>{*this}; }
+
+ private:
+ bool IsCorrectThread() const { return mThread == PR_GetCurrentThread(); }
+
+ bool IsNotCurrentlyAccessed() const { return mAccessCount == 0; }
+
+#define MOZ_DEFINE_THREAD_BOUND_ASSERT(predicate) \
+ void Assert##predicate() const { MOZ_DIAGNOSTIC_ASSERT(predicate()); }
+
+ MOZ_DEFINE_THREAD_BOUND_ASSERT(IsCorrectThread)
+ MOZ_DEFINE_THREAD_BOUND_ASSERT(IsNotCurrentlyAccessed)
+
+#undef MOZ_DEFINE_THREAD_BOUND_ASSERT
+};
+
+} // namespace mozilla
+
+#endif // mozilla_ThreadBound_h
diff --git a/xpcom/threads/ThreadDelay.cpp b/xpcom/threads/ThreadDelay.cpp
new file mode 100644
index 0000000000..1c38e25510
--- /dev/null
+++ b/xpcom/threads/ThreadDelay.cpp
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ThreadDelay.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/ChaosMode.h"
+
+#if defined(XP_WIN)
+# include <windows.h>
+#else
+# include <unistd.h>
+#endif
+
+namespace mozilla {
+
+void DelayForChaosMode(ChaosFeature aFeature,
+ const uint32_t aMicrosecondLimit) {
+ if (!ChaosMode::isActive(aFeature)) {
+ return;
+ }
+
+ MOZ_ASSERT(aMicrosecondLimit <= 1000);
+#if defined(XP_WIN)
+ // Windows doesn't support sleeping at less than millisecond resolution.
+ // We could spin here, or we could just sleep for one millisecond.
+ // Sleeping for a full millisecond causes heavy delays, so we don't do
+ // anything here for now until we have found a good way to sleep more
+ // precisely here.
+#else
+ const uint32_t duration = ChaosMode::randomUint32LessThan(aMicrosecondLimit);
+ ::usleep(duration);
+#endif
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/ThreadDelay.h b/xpcom/threads/ThreadDelay.h
new file mode 100644
index 0000000000..5cfc116e4d
--- /dev/null
+++ b/xpcom/threads/ThreadDelay.h
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ChaosMode.h"
+
+namespace mozilla {
+
+// Sleep for a random number of microseconds less than aMicrosecondLimit
+// if aFeature is enabled. On Windows, the sleep will always be 1 millisecond
+// due to platform limitations.
+void DelayForChaosMode(ChaosFeature aFeature, const uint32_t aMicrosecondLimit);
+
+} // namespace mozilla
diff --git a/xpcom/threads/ThreadEventQueue.cpp b/xpcom/threads/ThreadEventQueue.cpp
new file mode 100644
index 0000000000..dec317beef
--- /dev/null
+++ b/xpcom/threads/ThreadEventQueue.cpp
@@ -0,0 +1,324 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ThreadEventQueue.h"
+#include "mozilla/EventQueue.h"
+
+#include "LeakRefPtr.h"
+#include "nsComponentManagerUtils.h"
+#include "nsITargetShutdownTask.h"
+#include "nsIThreadInternal.h"
+#include "nsThreadUtils.h"
+#include "nsThread.h"
+#include "ThreadEventTarget.h"
+#include "mozilla/ProfilerLabels.h"
+#include "mozilla/TaskController.h"
+#include "mozilla/StaticPrefs_threads.h"
+
+using namespace mozilla;
+
+class ThreadEventQueue::NestedSink : public ThreadTargetSink {
+ public:
+ NestedSink(EventQueue* aQueue, ThreadEventQueue* aOwner)
+ : mQueue(aQueue), mOwner(aOwner) {}
+
+ bool PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
+ EventQueuePriority aPriority) final {
+ return mOwner->PutEventInternal(std::move(aEvent), aPriority, this);
+ }
+
+ void Disconnect(const MutexAutoLock& aProofOfLock) final { mQueue = nullptr; }
+
+ nsresult RegisterShutdownTask(nsITargetShutdownTask* aTask) final {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ nsresult UnregisterShutdownTask(nsITargetShutdownTask* aTask) final {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) {
+ if (mQueue) {
+ return mQueue->SizeOfIncludingThis(aMallocSizeOf);
+ }
+ return 0;
+ }
+
+ private:
+ friend class ThreadEventQueue;
+
+ // This is a non-owning reference. It must live at least until Disconnect is
+ // called to clear it out.
+ EventQueue* mQueue;
+ RefPtr<ThreadEventQueue> mOwner;
+};
+
+ThreadEventQueue::ThreadEventQueue(UniquePtr<EventQueue> aQueue,
+ bool aIsMainThread)
+ : mBaseQueue(std::move(aQueue)),
+ mLock("ThreadEventQueue"),
+ mEventsAvailable(mLock, "EventsAvail"),
+ mIsMainThread(aIsMainThread) {
+ if (aIsMainThread) {
+ TaskController::Get()->SetConditionVariable(&mEventsAvailable);
+ }
+}
+
+ThreadEventQueue::~ThreadEventQueue() { MOZ_ASSERT(mNestedQueues.IsEmpty()); }
+
+bool ThreadEventQueue::PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
+ EventQueuePriority aPriority) {
+ return PutEventInternal(std::move(aEvent), aPriority, nullptr);
+}
+
+bool ThreadEventQueue::PutEventInternal(already_AddRefed<nsIRunnable>&& aEvent,
+ EventQueuePriority aPriority,
+ NestedSink* aSink) {
+ // We want to leak the reference when we fail to dispatch it, so that
+ // we won't release the event in a wrong thread.
+ LeakRefPtr<nsIRunnable> event(std::move(aEvent));
+ nsCOMPtr<nsIThreadObserver> obs;
+
+ {
+ // Check if the runnable wants to override the passed-in priority.
+ // Do this outside the lock, so runnables implemented in JS can QI
+ // (and possibly GC) outside of the lock.
+ if (mIsMainThread) {
+ auto* e = event.get(); // can't do_QueryInterface on LeakRefPtr.
+ if (nsCOMPtr<nsIRunnablePriority> runnablePrio = do_QueryInterface(e)) {
+ uint32_t prio = nsIRunnablePriority::PRIORITY_NORMAL;
+ runnablePrio->GetPriority(&prio);
+ if (prio == nsIRunnablePriority::PRIORITY_CONTROL) {
+ aPriority = EventQueuePriority::Control;
+ } else if (prio == nsIRunnablePriority::PRIORITY_RENDER_BLOCKING) {
+ aPriority = EventQueuePriority::RenderBlocking;
+ } else if (prio == nsIRunnablePriority::PRIORITY_VSYNC) {
+ aPriority = EventQueuePriority::Vsync;
+ } else if (prio == nsIRunnablePriority::PRIORITY_INPUT_HIGH) {
+ aPriority = EventQueuePriority::InputHigh;
+ } else if (prio == nsIRunnablePriority::PRIORITY_MEDIUMHIGH) {
+ aPriority = EventQueuePriority::MediumHigh;
+ } else if (prio == nsIRunnablePriority::PRIORITY_DEFERRED_TIMERS) {
+ aPriority = EventQueuePriority::DeferredTimers;
+ } else if (prio == nsIRunnablePriority::PRIORITY_IDLE) {
+ aPriority = EventQueuePriority::Idle;
+ } else if (prio == nsIRunnablePriority::PRIORITY_LOW) {
+ aPriority = EventQueuePriority::Low;
+ }
+ }
+
+ if (aPriority == EventQueuePriority::Control &&
+ !StaticPrefs::threads_control_event_queue_enabled()) {
+ aPriority = EventQueuePriority::MediumHigh;
+ }
+ }
+
+ MutexAutoLock lock(mLock);
+
+ if (mEventsAreDoomed) {
+ return false;
+ }
+
+ if (aSink) {
+ if (!aSink->mQueue) {
+ return false;
+ }
+
+ aSink->mQueue->PutEvent(event.take(), aPriority, lock);
+ } else {
+ mBaseQueue->PutEvent(event.take(), aPriority, lock);
+ }
+
+ mEventsAvailable.Notify();
+
+ // Make sure to grab the observer before dropping the lock, otherwise the
+ // event that we just placed into the queue could run and eventually delete
+ // this nsThread before the calling thread is scheduled again. We would then
+ // crash while trying to access a dead nsThread.
+ obs = mObserver;
+ }
+
+ if (obs) {
+ obs->OnDispatchedEvent();
+ }
+
+ return true;
+}
+
+already_AddRefed<nsIRunnable> ThreadEventQueue::GetEvent(
+ bool aMayWait, mozilla::TimeDuration* aLastEventDelay) {
+ nsCOMPtr<nsIRunnable> event;
+ {
+ // Scope for lock. When we are about to return, we will exit this
+ // scope so we can do some work after releasing the lock but
+ // before returning.
+ MutexAutoLock lock(mLock);
+
+ for (;;) {
+ const bool noNestedQueue = mNestedQueues.IsEmpty();
+ if (noNestedQueue) {
+ event = mBaseQueue->GetEvent(lock, aLastEventDelay);
+ } else {
+ // We always get events from the topmost queue when there are nested
+ // queues.
+ event =
+ mNestedQueues.LastElement().mQueue->GetEvent(lock, aLastEventDelay);
+ }
+
+ if (event) {
+ break;
+ }
+
+ // No runnable available. Sleep waiting for one if if we're supposed to.
+ // Otherwise just go ahead and return null.
+ if (!aMayWait) {
+ break;
+ }
+
+ AUTO_PROFILER_LABEL("ThreadEventQueue::GetEvent::Wait", IDLE);
+ mEventsAvailable.Wait();
+ }
+ }
+
+ return event.forget();
+}
+
+bool ThreadEventQueue::HasPendingEvent() {
+ MutexAutoLock lock(mLock);
+
+ // We always get events from the topmost queue when there are nested queues.
+ if (mNestedQueues.IsEmpty()) {
+ return mBaseQueue->HasReadyEvent(lock);
+ } else {
+ return mNestedQueues.LastElement().mQueue->HasReadyEvent(lock);
+ }
+}
+
+bool ThreadEventQueue::ShutdownIfNoPendingEvents() {
+ MutexAutoLock lock(mLock);
+ if (mNestedQueues.IsEmpty() && mBaseQueue->IsEmpty(lock)) {
+ mEventsAreDoomed = true;
+ return true;
+ }
+ return false;
+}
+
+already_AddRefed<nsISerialEventTarget> ThreadEventQueue::PushEventQueue() {
+ auto queue = MakeUnique<EventQueue>();
+ RefPtr<NestedSink> sink = new NestedSink(queue.get(), this);
+ RefPtr<ThreadEventTarget> eventTarget =
+ new ThreadEventTarget(sink, NS_IsMainThread(), false);
+
+ MutexAutoLock lock(mLock);
+
+ mNestedQueues.AppendElement(NestedQueueItem(std::move(queue), eventTarget));
+ return eventTarget.forget();
+}
+
+void ThreadEventQueue::PopEventQueue(nsIEventTarget* aTarget) {
+ MutexAutoLock lock(mLock);
+
+ MOZ_ASSERT(!mNestedQueues.IsEmpty());
+
+ NestedQueueItem& item = mNestedQueues.LastElement();
+
+ MOZ_ASSERT(aTarget == item.mEventTarget);
+
+ // Disconnect the event target that will be popped.
+ item.mEventTarget->Disconnect(lock);
+
+ EventQueue* prevQueue =
+ mNestedQueues.Length() == 1
+ ? mBaseQueue.get()
+ : mNestedQueues[mNestedQueues.Length() - 2].mQueue.get();
+
+ // Move events from the old queue to the new one.
+ nsCOMPtr<nsIRunnable> event;
+ TimeDuration delay;
+ while ((event = item.mQueue->GetEvent(lock, &delay))) {
+ // preserve the event delay so far
+ prevQueue->PutEvent(event.forget(), EventQueuePriority::Normal, lock,
+ &delay);
+ }
+
+ mNestedQueues.RemoveLastElement();
+}
+
+size_t ThreadEventQueue::SizeOfExcludingThis(
+ mozilla::MallocSizeOf aMallocSizeOf) {
+ size_t n = 0;
+
+ {
+ MutexAutoLock lock(mLock);
+ n += mBaseQueue->SizeOfIncludingThis(aMallocSizeOf);
+ n += mNestedQueues.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ for (auto& queue : mNestedQueues) {
+ n += queue.mEventTarget->SizeOfIncludingThis(aMallocSizeOf);
+ }
+ }
+
+ return SynchronizedEventQueue::SizeOfExcludingThis(aMallocSizeOf) + n;
+}
+
+already_AddRefed<nsIThreadObserver> ThreadEventQueue::GetObserver() {
+ MutexAutoLock lock(mLock);
+ return do_AddRef(mObserver);
+}
+
+already_AddRefed<nsIThreadObserver> ThreadEventQueue::GetObserverOnThread()
+ MOZ_NO_THREAD_SAFETY_ANALYSIS {
+ // only written on this thread
+ return do_AddRef(mObserver);
+}
+
+void ThreadEventQueue::SetObserver(nsIThreadObserver* aObserver) {
+ // Always called from the thread - single writer.
+ nsCOMPtr<nsIThreadObserver> observer = aObserver;
+ {
+ MutexAutoLock lock(mLock);
+ mObserver.swap(observer);
+ }
+ if (NS_IsMainThread()) {
+ TaskController::Get()->SetThreadObserver(aObserver);
+ }
+}
+
+nsresult ThreadEventQueue::RegisterShutdownTask(nsITargetShutdownTask* aTask) {
+ NS_ENSURE_ARG(aTask);
+ MutexAutoLock lock(mLock);
+ if (mEventsAreDoomed || mShutdownTasksRun) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ MOZ_ASSERT(!mShutdownTasks.Contains(aTask));
+ mShutdownTasks.AppendElement(aTask);
+ return NS_OK;
+}
+
+nsresult ThreadEventQueue::UnregisterShutdownTask(
+ nsITargetShutdownTask* aTask) {
+ NS_ENSURE_ARG(aTask);
+ MutexAutoLock lock(mLock);
+ if (mEventsAreDoomed || mShutdownTasksRun) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ return mShutdownTasks.RemoveElement(aTask) ? NS_OK : NS_ERROR_UNEXPECTED;
+}
+
+void ThreadEventQueue::RunShutdownTasks() {
+ nsTArray<nsCOMPtr<nsITargetShutdownTask>> shutdownTasks;
+ {
+ MutexAutoLock lock(mLock);
+ shutdownTasks = std::move(mShutdownTasks);
+ mShutdownTasks.Clear();
+ mShutdownTasksRun = true;
+ }
+ for (auto& task : shutdownTasks) {
+ task->TargetShutdown();
+ }
+}
+
+ThreadEventQueue::NestedQueueItem::NestedQueueItem(
+ UniquePtr<EventQueue> aQueue, ThreadEventTarget* aEventTarget)
+ : mQueue(std::move(aQueue)), mEventTarget(aEventTarget) {}
diff --git a/xpcom/threads/ThreadEventQueue.h b/xpcom/threads/ThreadEventQueue.h
new file mode 100644
index 0000000000..5244acb3dc
--- /dev/null
+++ b/xpcom/threads/ThreadEventQueue.h
@@ -0,0 +1,95 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_ThreadEventQueue_h
+#define mozilla_ThreadEventQueue_h
+
+#include "mozilla/EventQueue.h"
+#include "mozilla/CondVar.h"
+#include "mozilla/SynchronizedEventQueue.h"
+#include "nsCOMPtr.h"
+#include "nsTArray.h"
+
+class nsIEventTarget;
+class nsISerialEventTarget;
+class nsIThreadObserver;
+
+namespace mozilla {
+
+class EventQueue;
+class ThreadEventTarget;
+
+// A ThreadEventQueue implements normal monitor-style synchronization over the
+// EventQueue. It also implements PushEventQueue and PopEventQueue for workers
+// (see the documentation below for an explanation of those). All threads use a
+// ThreadEventQueue as their event queue. Although for the main thread this
+// simply forwards events to the TaskController.
+class ThreadEventQueue final : public SynchronizedEventQueue {
+ public:
+ explicit ThreadEventQueue(UniquePtr<EventQueue> aQueue,
+ bool aIsMainThread = false);
+
+ bool PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
+ EventQueuePriority aPriority) final;
+
+ already_AddRefed<nsIRunnable> GetEvent(
+ bool aMayWait, mozilla::TimeDuration* aLastEventDelay = nullptr) final;
+ bool HasPendingEvent() final;
+
+ bool ShutdownIfNoPendingEvents() final;
+
+ void Disconnect(const MutexAutoLock& aProofOfLock) final {}
+
+ nsresult RegisterShutdownTask(nsITargetShutdownTask* aTask) final;
+ nsresult UnregisterShutdownTask(nsITargetShutdownTask* aTask) final;
+ void RunShutdownTasks() final;
+
+ already_AddRefed<nsISerialEventTarget> PushEventQueue() final;
+ void PopEventQueue(nsIEventTarget* aTarget) final;
+
+ already_AddRefed<nsIThreadObserver> GetObserver() final;
+ already_AddRefed<nsIThreadObserver> GetObserverOnThread() final;
+ void SetObserver(nsIThreadObserver* aObserver) final;
+
+ Mutex& MutexRef() { return mLock; }
+
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) override;
+
+ private:
+ class NestedSink;
+
+ virtual ~ThreadEventQueue();
+
+ bool PutEventInternal(already_AddRefed<nsIRunnable>&& aEvent,
+ EventQueuePriority aPriority, NestedSink* aQueue);
+
+ const UniquePtr<EventQueue> mBaseQueue MOZ_GUARDED_BY(mLock);
+
+ struct NestedQueueItem {
+ UniquePtr<EventQueue> mQueue;
+ RefPtr<ThreadEventTarget> mEventTarget;
+
+ NestedQueueItem(UniquePtr<EventQueue> aQueue,
+ ThreadEventTarget* aEventTarget);
+ };
+
+ nsTArray<NestedQueueItem> mNestedQueues MOZ_GUARDED_BY(mLock);
+
+ Mutex mLock;
+ CondVar mEventsAvailable MOZ_GUARDED_BY(mLock);
+
+ bool mEventsAreDoomed MOZ_GUARDED_BY(mLock) = false;
+ nsCOMPtr<nsIThreadObserver> mObserver MOZ_GUARDED_BY(mLock);
+ nsTArray<nsCOMPtr<nsITargetShutdownTask>> mShutdownTasks
+ MOZ_GUARDED_BY(mLock);
+ bool mShutdownTasksRun MOZ_GUARDED_BY(mLock) = false;
+
+ const bool mIsMainThread;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_ThreadEventQueue_h
diff --git a/xpcom/threads/ThreadEventTarget.cpp b/xpcom/threads/ThreadEventTarget.cpp
new file mode 100644
index 0000000000..d5df2efda1
--- /dev/null
+++ b/xpcom/threads/ThreadEventTarget.cpp
@@ -0,0 +1,136 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ThreadEventTarget.h"
+#include "mozilla/ThreadEventQueue.h"
+
+#include "LeakRefPtr.h"
+#include "mozilla/DelayedRunnable.h"
+#include "mozilla/SpinEventLoopUntil.h"
+#include "mozilla/TimeStamp.h"
+#include "nsComponentManagerUtils.h"
+#include "nsITimer.h"
+#include "nsThreadManager.h"
+#include "nsThreadSyncDispatch.h"
+#include "nsThreadUtils.h"
+#include "ThreadDelay.h"
+
+using namespace mozilla;
+
+#ifdef DEBUG
+// This flag will be set right after XPCOMShutdownThreads finished but before
+// we continue with other processing. It is exclusively meant to prime the
+// assertion of ThreadEventTarget::Dispatch as early as possible.
+// Please use AppShutdown::IsInOrBeyond(ShutdownPhase::???)
+// elsewhere to check for shutdown phases.
+static mozilla::Atomic<bool, mozilla::SequentiallyConsistent>
+ gXPCOMThreadsShutDownNotified(false);
+#endif
+
+ThreadEventTarget::ThreadEventTarget(ThreadTargetSink* aSink,
+ bool aIsMainThread, bool aBlockDispatch)
+ : mSink(aSink),
+#ifdef DEBUG
+ mIsMainThread(aIsMainThread),
+#endif
+ mBlockDispatch(aBlockDispatch) {
+ mThread = PR_GetCurrentThread();
+}
+
+ThreadEventTarget::~ThreadEventTarget() = default;
+
+void ThreadEventTarget::SetCurrentThread(PRThread* aThread) {
+ mThread = aThread;
+}
+
+void ThreadEventTarget::ClearCurrentThread() { mThread = nullptr; }
+
+NS_IMPL_ISUPPORTS(ThreadEventTarget, nsIEventTarget, nsISerialEventTarget)
+
+NS_IMETHODIMP
+ThreadEventTarget::DispatchFromScript(nsIRunnable* aRunnable, uint32_t aFlags) {
+ return Dispatch(do_AddRef(aRunnable), aFlags);
+}
+
+#ifdef DEBUG
+// static
+void ThreadEventTarget::XPCOMShutdownThreadsNotificationFinished() {
+ gXPCOMThreadsShutDownNotified = true;
+}
+#endif
+
+NS_IMETHODIMP
+ThreadEventTarget::Dispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aFlags) {
+ // We want to leak the reference when we fail to dispatch it, so that
+ // we won't release the event in a wrong thread.
+ LeakRefPtr<nsIRunnable> event(std::move(aEvent));
+ if (NS_WARN_IF(!event)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ NS_ASSERTION(!gXPCOMThreadsShutDownNotified || mIsMainThread ||
+ PR_GetCurrentThread() == mThread,
+ "Dispatch to non-main thread after xpcom-shutdown-threads");
+
+ if (mBlockDispatch && !(aFlags & NS_DISPATCH_IGNORE_BLOCK_DISPATCH)) {
+ MOZ_DIAGNOSTIC_ASSERT(
+ false,
+ "Attempt to dispatch to thread which does not usually process "
+ "dispatched runnables until shutdown");
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ LogRunnable::LogDispatch(event.get());
+
+ NS_ASSERTION((aFlags & (NS_DISPATCH_AT_END |
+ NS_DISPATCH_IGNORE_BLOCK_DISPATCH)) == aFlags,
+ "unexpected dispatch flags");
+ if (!mSink->PutEvent(event.take(), EventQueuePriority::Normal)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ // Delay to encourage the receiving task to run before we do work.
+ DelayForChaosMode(ChaosFeature::TaskDispatching, 1000);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+ThreadEventTarget::DelayedDispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aDelayMs) {
+ nsCOMPtr<nsIRunnable> event = aEvent;
+ NS_ENSURE_TRUE(!!aDelayMs, NS_ERROR_UNEXPECTED);
+
+ RefPtr<DelayedRunnable> r =
+ new DelayedRunnable(do_AddRef(this), event.forget(), aDelayMs);
+ nsresult rv = r->Init();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return Dispatch(r.forget(), NS_DISPATCH_NORMAL);
+}
+
+NS_IMETHODIMP
+ThreadEventTarget::RegisterShutdownTask(nsITargetShutdownTask* aTask) {
+ return mSink->RegisterShutdownTask(aTask);
+}
+
+NS_IMETHODIMP
+ThreadEventTarget::UnregisterShutdownTask(nsITargetShutdownTask* aTask) {
+ return mSink->UnregisterShutdownTask(aTask);
+}
+
+NS_IMETHODIMP
+ThreadEventTarget::IsOnCurrentThread(bool* aIsOnCurrentThread) {
+ *aIsOnCurrentThread = IsOnCurrentThread();
+ return NS_OK;
+}
+
+NS_IMETHODIMP_(bool)
+ThreadEventTarget::IsOnCurrentThreadInfallible() {
+ // This method is only going to be called if `mThread` is null, which
+ // only happens when the thread has exited the event loop. Therefore, when
+ // we are called, we can never be on this thread.
+ return false;
+}
diff --git a/xpcom/threads/ThreadEventTarget.h b/xpcom/threads/ThreadEventTarget.h
new file mode 100644
index 0000000000..b78411aa80
--- /dev/null
+++ b/xpcom/threads/ThreadEventTarget.h
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_ThreadEventTarget_h
+#define mozilla_ThreadEventTarget_h
+
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/SynchronizedEventQueue.h" // for ThreadTargetSink
+#include "nsISerialEventTarget.h"
+
+namespace mozilla {
+class DelayedRunnable;
+
+// ThreadEventTarget handles the details of posting an event to a thread. It can
+// be used with any ThreadTargetSink implementation.
+class ThreadEventTarget final : public nsISerialEventTarget {
+ public:
+ ThreadEventTarget(ThreadTargetSink* aSink, bool aIsMainThread,
+ bool aBlockDispatch);
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIEVENTTARGET_FULL
+
+ // Disconnects the target so that it can no longer post events.
+ void Disconnect(const MutexAutoLock& aProofOfLock) {
+ mSink->Disconnect(aProofOfLock);
+ }
+
+ // Sets the thread for which IsOnCurrentThread returns true to the current
+ // thread.
+ void SetCurrentThread(PRThread* aThread);
+ // Call ClearCurrentThread() before the PRThread is deleted on thread join.
+ void ClearCurrentThread();
+
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
+ size_t n = 0;
+ if (mSink) {
+ n += mSink->SizeOfIncludingThis(aMallocSizeOf);
+ }
+ return aMallocSizeOf(this) + n;
+ }
+
+#ifdef DEBUG
+ static void XPCOMShutdownThreadsNotificationFinished();
+#endif
+
+ private:
+ ~ThreadEventTarget();
+
+ RefPtr<ThreadTargetSink> mSink;
+#ifdef DEBUG
+ const bool mIsMainThread;
+#endif
+ const bool mBlockDispatch;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_ThreadEventTarget_h
diff --git a/xpcom/threads/ThreadLocalVariables.cpp b/xpcom/threads/ThreadLocalVariables.cpp
new file mode 100644
index 0000000000..606c0c6384
--- /dev/null
+++ b/xpcom/threads/ThreadLocalVariables.cpp
@@ -0,0 +1,16 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ThreadLocal.h"
+
+// This variable is used to ensure creating new URI doesn't put us in an
+// infinite loop
+MOZ_THREAD_LOCAL(uint32_t) gTlsURLRecursionCount;
+
+void InitThreadLocalVariables() {
+ if (!gTlsURLRecursionCount.init()) {
+ MOZ_CRASH("Could not init gTlsURLRecursionCount");
+ }
+ gTlsURLRecursionCount.set(0);
+}
diff --git a/xpcom/threads/ThrottledEventQueue.cpp b/xpcom/threads/ThrottledEventQueue.cpp
new file mode 100644
index 0000000000..9e4219b305
--- /dev/null
+++ b/xpcom/threads/ThrottledEventQueue.cpp
@@ -0,0 +1,459 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ThrottledEventQueue.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/ClearOnShutdown.h"
+#include "mozilla/CondVar.h"
+#include "mozilla/EventQueue.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/Unused.h"
+#include "nsThreadUtils.h"
+
+namespace mozilla {
+
+namespace {} // anonymous namespace
+
+// The ThrottledEventQueue is designed with inner and outer objects:
+//
+// XPCOM code base event target
+// | |
+// v v
+// +-------+ +--------+
+// | Outer | +-->|executor|
+// +-------+ | +--------+
+// | | |
+// | +-------+ |
+// +-->| Inner |<--+
+// +-------+
+//
+// Client code references the outer nsIEventTarget which in turn references
+// an inner object, which actually holds the queue of runnables.
+//
+// Whenever the queue is non-empty (and not paused), it keeps an "executor"
+// runnable dispatched to the base event target. Each time the executor is run,
+// it draws the next event from Inner's queue and runs it. If that queue has
+// more events, the executor is dispatched to the base again.
+//
+// The executor holds a strong reference to the Inner object. This means that if
+// the outer object is dereferenced and destroyed, the Inner object will remain
+// live for as long as the executor exists - that is, until the Inner's queue is
+// empty.
+//
+// A Paused ThrottledEventQueue does not enqueue an executor when new events are
+// added. Any executor previously queued on the base event target draws no
+// events from a Paused ThrottledEventQueue, and returns without re-enqueueing
+// itself. Since there is no executor keeping the Inner object alive until its
+// queue is empty, dropping a Paused ThrottledEventQueue may drop the Inner
+// while it still owns events. This is the correct behavior: if there are no
+// references to it, it will never be Resumed, and thus it will never dispatch
+// events again.
+//
+// Resuming a ThrottledEventQueue must dispatch an executor, so calls to Resume
+// are fallible for the same reasons as calls to Dispatch.
+//
+// The xpcom shutdown process drains the main thread's event queue several
+// times, so if a ThrottledEventQueue is being driven by the main thread, it
+// should get emptied out by the time we reach the "eventq shutdown" phase.
+class ThrottledEventQueue::Inner final : public nsISupports {
+ // The runnable which is dispatched to the underlying base target. Since
+ // we only execute one event at a time we just re-use a single instance
+ // of this class while there are events left in the queue.
+ class Executor final : public Runnable, public nsIRunnablePriority {
+ // The Inner whose runnables we execute. mInner->mExecutor points
+ // to this executor, forming a reference loop.
+ RefPtr<Inner> mInner;
+
+ ~Executor() = default;
+
+ public:
+ explicit Executor(Inner* aInner)
+ : Runnable("ThrottledEventQueue::Inner::Executor"), mInner(aInner) {}
+
+ NS_DECL_ISUPPORTS_INHERITED
+
+ NS_IMETHODIMP
+ Run() override {
+ mInner->ExecuteRunnable();
+ return NS_OK;
+ }
+
+ NS_IMETHODIMP
+ GetPriority(uint32_t* aPriority) override {
+ *aPriority = mInner->mPriority;
+ return NS_OK;
+ }
+
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ NS_IMETHODIMP
+ GetName(nsACString& aName) override { return mInner->CurrentName(aName); }
+#endif
+ };
+
+ mutable Mutex mMutex;
+ mutable CondVar mIdleCondVar MOZ_GUARDED_BY(mMutex);
+
+ // As-of-yet unexecuted runnables queued on this ThrottledEventQueue.
+ //
+ // Used from any thread; protected by mMutex. Signals mIdleCondVar when
+ // emptied.
+ EventQueueSized<64> mEventQueue MOZ_GUARDED_BY(mMutex);
+
+ // The event target we dispatch our events (actually, just our Executor) to.
+ //
+ // Written only during construction. Readable by any thread without locking.
+ const nsCOMPtr<nsISerialEventTarget> mBaseTarget;
+
+ // The Executor that we dispatch to mBaseTarget to draw runnables from our
+ // queue. mExecutor->mInner points to this Inner, forming a reference loop.
+ //
+ // Used from any thread; protected by mMutex.
+ nsCOMPtr<nsIRunnable> mExecutor MOZ_GUARDED_BY(mMutex);
+
+ const char* const mName;
+
+ const uint32_t mPriority;
+
+ // True if this queue is currently paused.
+ // Used from any thread; protected by mMutex.
+ bool mIsPaused MOZ_GUARDED_BY(mMutex);
+
+ explicit Inner(nsISerialEventTarget* aBaseTarget, const char* aName,
+ uint32_t aPriority)
+ : mMutex("ThrottledEventQueue"),
+ mIdleCondVar(mMutex, "ThrottledEventQueue:Idle"),
+ mBaseTarget(aBaseTarget),
+ mName(aName),
+ mPriority(aPriority),
+ mIsPaused(false) {
+ MOZ_ASSERT(mName, "Must pass a valid name!");
+ }
+
+ ~Inner() {
+#ifdef DEBUG
+ MutexAutoLock lock(mMutex);
+
+ // As long as an executor exists, it had better keep us alive, since it's
+ // going to call ExecuteRunnable on us.
+ MOZ_ASSERT(!mExecutor);
+
+ // If we have any events in our queue, there should be an executor queued
+ // for them, and that should have kept us alive. The exception is that, if
+ // we're paused, we don't enqueue an executor.
+ MOZ_ASSERT(mEventQueue.IsEmpty(lock) || IsPaused(lock));
+
+ // Some runnables are only safe to drop on the main thread, so if our queue
+ // isn't empty, we'd better be on the main thread.
+ MOZ_ASSERT_IF(!mEventQueue.IsEmpty(lock), NS_IsMainThread());
+#endif
+ }
+
+ // Make sure an executor has been queued on our base target. If we already
+ // have one, do nothing; otherwise, create and dispatch it.
+ nsresult EnsureExecutor(MutexAutoLock& lock) MOZ_REQUIRES(mMutex) {
+ if (mExecutor) return NS_OK;
+
+ // Note, this creates a ref cycle keeping the inner alive
+ // until the queue is drained.
+ mExecutor = new Executor(this);
+ nsresult rv = mBaseTarget->Dispatch(mExecutor, NS_DISPATCH_NORMAL);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ mExecutor = nullptr;
+ return rv;
+ }
+
+ return NS_OK;
+ }
+
+ nsresult CurrentName(nsACString& aName) {
+ nsCOMPtr<nsIRunnable> event;
+
+#ifdef DEBUG
+ bool currentThread = false;
+ mBaseTarget->IsOnCurrentThread(&currentThread);
+ MOZ_ASSERT(currentThread);
+#endif
+
+ {
+ MutexAutoLock lock(mMutex);
+ event = mEventQueue.PeekEvent(lock);
+ // It is possible that mEventQueue wasn't empty when the executor
+ // was added to the queue, but someone processed events from mEventQueue
+ // before the executor, this is why mEventQueue is empty here
+ if (!event) {
+ aName.AssignLiteral("no runnables left in the ThrottledEventQueue");
+ return NS_OK;
+ }
+ }
+
+ if (nsCOMPtr<nsINamed> named = do_QueryInterface(event)) {
+ nsresult rv = named->GetName(aName);
+ return rv;
+ }
+
+ aName.AssignASCII(mName);
+ return NS_OK;
+ }
+
+ void ExecuteRunnable() {
+ // Any thread
+ nsCOMPtr<nsIRunnable> event;
+
+#ifdef DEBUG
+ bool currentThread = false;
+ mBaseTarget->IsOnCurrentThread(&currentThread);
+ MOZ_ASSERT(currentThread);
+#endif
+
+ {
+ MutexAutoLock lock(mMutex);
+
+ // Normally, a paused queue doesn't dispatch any executor, but we might
+ // have been paused after the executor was already in flight. There's no
+ // way to yank the executor out of the base event target, so we just check
+ // for a paused queue here and return without running anything. We'll
+ // create a new executor when we're resumed.
+ if (IsPaused(lock)) {
+ // Note, this breaks a ref cycle.
+ mExecutor = nullptr;
+ return;
+ }
+
+ // We only dispatch an executor runnable when we know there is something
+ // in the queue, so this should never fail.
+ event = mEventQueue.GetEvent(lock);
+ MOZ_ASSERT(event);
+
+ // If there are more events in the queue, then dispatch the next
+ // executor. We do this now, before running the event, because
+ // the event might spin the event loop and we don't want to stall
+ // the queue.
+ if (mEventQueue.HasReadyEvent(lock)) {
+ // Dispatch the next base target runnable to attempt to execute
+ // the next throttled event. We must do this before executing
+ // the event in case the event spins the event loop.
+ MOZ_ALWAYS_SUCCEEDS(
+ mBaseTarget->Dispatch(mExecutor, NS_DISPATCH_NORMAL));
+ }
+
+ // Otherwise the queue is empty and we can stop dispatching the
+ // executor.
+ else {
+ // Break the Executor::mInner / Inner::mExecutor reference loop.
+ mExecutor = nullptr;
+ mIdleCondVar.NotifyAll();
+ }
+ }
+
+ // Execute the event now that we have unlocked.
+ LogRunnable::Run log(event);
+ Unused << event->Run();
+
+ // To cover the event's destructor code in the LogRunnable log
+ event = nullptr;
+ }
+
+ public:
+ static already_AddRefed<Inner> Create(nsISerialEventTarget* aBaseTarget,
+ const char* aName, uint32_t aPriority) {
+ MOZ_ASSERT(NS_IsMainThread());
+ // FIXME: This assertion only worked when `sCurrentShutdownPhase` was not
+ // being updated.
+ // MOZ_ASSERT(ClearOnShutdown_Internal::sCurrentShutdownPhase ==
+ // ShutdownPhase::NotInShutdown);
+
+ RefPtr<Inner> ref = new Inner(aBaseTarget, aName, aPriority);
+ return ref.forget();
+ }
+
+ bool IsEmpty() const {
+ // Any thread
+ return Length() == 0;
+ }
+
+ uint32_t Length() const {
+ // Any thread
+ MutexAutoLock lock(mMutex);
+ return mEventQueue.Count(lock);
+ }
+
+ already_AddRefed<nsIRunnable> GetEvent() {
+ MutexAutoLock lock(mMutex);
+ return mEventQueue.GetEvent(lock);
+ }
+
+ void AwaitIdle() const {
+ // Any thread, except the main thread or our base target. Blocking the
+ // main thread is forbidden. Blocking the base target is guaranteed to
+ // produce a deadlock.
+ MOZ_ASSERT(!NS_IsMainThread());
+#ifdef DEBUG
+ bool onBaseTarget = false;
+ Unused << mBaseTarget->IsOnCurrentThread(&onBaseTarget);
+ MOZ_ASSERT(!onBaseTarget);
+#endif
+
+ MutexAutoLock lock(mMutex);
+ while (mExecutor || IsPaused(lock)) {
+ mIdleCondVar.Wait();
+ }
+ }
+
+ bool IsPaused() const {
+ MutexAutoLock lock(mMutex);
+ return IsPaused(lock);
+ }
+
+ bool IsPaused(const MutexAutoLock& aProofOfLock) const MOZ_REQUIRES(mMutex) {
+ return mIsPaused;
+ }
+
+ nsresult SetIsPaused(bool aIsPaused) {
+ MutexAutoLock lock(mMutex);
+
+ // If we will be unpaused, and we have events in our queue, make sure we
+ // have an executor queued on the base event target to run them. Do this
+ // before we actually change mIsPaused, since this is fallible.
+ if (!aIsPaused && !mEventQueue.IsEmpty(lock)) {
+ nsresult rv = EnsureExecutor(lock);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ }
+
+ mIsPaused = aIsPaused;
+ return NS_OK;
+ }
+
+ nsresult DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) {
+ // Any thread
+ nsCOMPtr<nsIRunnable> r = aEvent;
+ return Dispatch(r.forget(), aFlags);
+ }
+
+ nsresult Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags) {
+ MOZ_ASSERT(aFlags == NS_DISPATCH_NORMAL || aFlags == NS_DISPATCH_AT_END);
+
+ // Any thread
+ MutexAutoLock lock(mMutex);
+
+ if (!IsPaused(lock)) {
+ // Make sure we have an executor in flight to process events. This is
+ // fallible, so do it first. Our lock will prevent the executor from
+ // accessing the event queue before we add the event below.
+ nsresult rv = EnsureExecutor(lock);
+ if (NS_FAILED(rv)) return rv;
+ }
+
+ // Only add the event to the underlying queue if are able to
+ // dispatch to our base target.
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ LogRunnable::LogDispatch(event);
+ mEventQueue.PutEvent(event.forget(), EventQueuePriority::Normal, lock);
+ return NS_OK;
+ }
+
+ nsresult DelayedDispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aDelay) {
+ // The base target may implement this, but we don't. Always fail
+ // to provide consistent behavior.
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ nsresult RegisterShutdownTask(nsITargetShutdownTask* aTask) {
+ return mBaseTarget->RegisterShutdownTask(aTask);
+ }
+
+ nsresult UnregisterShutdownTask(nsITargetShutdownTask* aTask) {
+ return mBaseTarget->UnregisterShutdownTask(aTask);
+ }
+
+ bool IsOnCurrentThread() { return mBaseTarget->IsOnCurrentThread(); }
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+};
+
+NS_IMPL_ISUPPORTS(ThrottledEventQueue::Inner, nsISupports);
+
+NS_IMPL_ISUPPORTS_INHERITED(ThrottledEventQueue::Inner::Executor, Runnable,
+ nsIRunnablePriority)
+
+NS_IMPL_ISUPPORTS(ThrottledEventQueue, ThrottledEventQueue, nsIEventTarget,
+ nsISerialEventTarget);
+
+ThrottledEventQueue::ThrottledEventQueue(already_AddRefed<Inner> aInner)
+ : mInner(aInner) {
+ MOZ_ASSERT(mInner);
+}
+
+already_AddRefed<ThrottledEventQueue> ThrottledEventQueue::Create(
+ nsISerialEventTarget* aBaseTarget, const char* aName, uint32_t aPriority) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(aBaseTarget);
+
+ RefPtr<Inner> inner = Inner::Create(aBaseTarget, aName, aPriority);
+
+ RefPtr<ThrottledEventQueue> ref = new ThrottledEventQueue(inner.forget());
+ return ref.forget();
+}
+
+bool ThrottledEventQueue::IsEmpty() const { return mInner->IsEmpty(); }
+
+uint32_t ThrottledEventQueue::Length() const { return mInner->Length(); }
+
+// Get the next runnable from the queue
+already_AddRefed<nsIRunnable> ThrottledEventQueue::GetEvent() {
+ return mInner->GetEvent();
+}
+
+void ThrottledEventQueue::AwaitIdle() const { return mInner->AwaitIdle(); }
+
+nsresult ThrottledEventQueue::SetIsPaused(bool aIsPaused) {
+ return mInner->SetIsPaused(aIsPaused);
+}
+
+bool ThrottledEventQueue::IsPaused() const { return mInner->IsPaused(); }
+
+NS_IMETHODIMP
+ThrottledEventQueue::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) {
+ return mInner->DispatchFromScript(aEvent, aFlags);
+}
+
+NS_IMETHODIMP
+ThrottledEventQueue::Dispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aFlags) {
+ return mInner->Dispatch(std::move(aEvent), aFlags);
+}
+
+NS_IMETHODIMP
+ThrottledEventQueue::DelayedDispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aFlags) {
+ return mInner->DelayedDispatch(std::move(aEvent), aFlags);
+}
+
+NS_IMETHODIMP
+ThrottledEventQueue::RegisterShutdownTask(nsITargetShutdownTask* aTask) {
+ return mInner->RegisterShutdownTask(aTask);
+}
+
+NS_IMETHODIMP
+ThrottledEventQueue::UnregisterShutdownTask(nsITargetShutdownTask* aTask) {
+ return mInner->UnregisterShutdownTask(aTask);
+}
+
+NS_IMETHODIMP
+ThrottledEventQueue::IsOnCurrentThread(bool* aResult) {
+ *aResult = mInner->IsOnCurrentThread();
+ return NS_OK;
+}
+
+NS_IMETHODIMP_(bool)
+ThrottledEventQueue::IsOnCurrentThreadInfallible() {
+ return mInner->IsOnCurrentThread();
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/ThrottledEventQueue.h b/xpcom/threads/ThrottledEventQueue.h
new file mode 100644
index 0000000000..cf37a10a6d
--- /dev/null
+++ b/xpcom/threads/ThrottledEventQueue.h
@@ -0,0 +1,118 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// nsIEventTarget wrapper for throttling event dispatch.
+
+#ifndef mozilla_ThrottledEventQueue_h
+#define mozilla_ThrottledEventQueue_h
+
+#include "nsISerialEventTarget.h"
+
+#define NS_THROTTLEDEVENTQUEUE_IID \
+ { \
+ 0x8f3cf7dc, 0xfc14, 0x4ad5, { \
+ 0x9f, 0xd5, 0xdb, 0x79, 0xbc, 0xe6, 0xd5, 0x08 \
+ } \
+ }
+
+namespace mozilla {
+
+// A ThrottledEventQueue is an event target that can be used to throttle
+// events being dispatched to another base target. It maintains its
+// own queue of events and only dispatches one at a time to the wrapped
+// target. This can be used to avoid flooding the base target.
+//
+// Flooding is avoided via a very simple principle. Runnables dispatched
+// to the ThrottledEventQueue are only dispatched to the base target
+// one at a time. Only once that runnable has executed will we dispatch
+// the next runnable to the base target. This in effect makes all
+// runnables passing through the ThrottledEventQueue yield to other work
+// on the base target.
+//
+// ThrottledEventQueue keeps runnables waiting to be dispatched to the
+// base in its own internal queue. Code can query the length of this
+// queue using IsEmpty() and Length(). Further, code implement back
+// pressure by checking the depth of the queue and deciding to stop
+// issuing runnables if they see the ThrottledEventQueue is backed up.
+// Code running on other threads could even use AwaitIdle() to block
+// all operation until the ThrottledEventQueue drains.
+//
+// Note, this class is similar to TaskQueue, but also differs in a few
+// ways. First, it is a very simple nsIEventTarget implementation. It
+// does not use the AbstractThread API.
+//
+// In addition, ThrottledEventQueue currently dispatches its next
+// runnable to the base target *before* running the current event. This
+// allows the event code to spin the event loop without stalling the
+// ThrottledEventQueue. In contrast, TaskQueue only dispatches its next
+// runnable after running the current event. That approach is necessary
+// for TaskQueue in order to work with thread pool targets.
+//
+// So, if you are targeting a thread pool you probably want a TaskQueue.
+// If you are targeting a single thread or other non-concurrent event
+// target, you probably want a ThrottledEventQueue.
+//
+// If you drop a ThrottledEventQueue while its queue still has events to be run,
+// they will continue to be dispatched as usual to the base. Only once the last
+// event has run will all the ThrottledEventQueue's memory be freed.
+class ThrottledEventQueue final : public nsISerialEventTarget {
+ class Inner;
+ RefPtr<Inner> mInner;
+
+ explicit ThrottledEventQueue(already_AddRefed<Inner> aInner);
+ ~ThrottledEventQueue() = default;
+
+ public:
+ // Create a ThrottledEventQueue for the given target.
+ static already_AddRefed<ThrottledEventQueue> Create(
+ nsISerialEventTarget* aBaseTarget, const char* aName,
+ uint32_t aPriority = nsIRunnablePriority::PRIORITY_NORMAL);
+
+ // Determine if there are any events pending in the queue.
+ bool IsEmpty() const;
+
+ // Determine how many events are pending in the queue.
+ uint32_t Length() const;
+
+ already_AddRefed<nsIRunnable> GetEvent();
+
+ // Block the current thread until the queue is empty. This may not be called
+ // on the main thread or the base target. The ThrottledEventQueue must not be
+ // paused.
+ void AwaitIdle() const;
+
+ // If |aIsPaused| is true, pause execution of events from this queue. No
+ // events from this queue will be run until this is called with |aIsPaused|
+ // false.
+ //
+ // To un-pause a ThrottledEventQueue, we need to dispatch a runnable to the
+ // underlying event target. That operation may fail, so this method is
+ // fallible as well.
+ //
+ // Note that, although ThrottledEventQueue's behavior is descibed as queueing
+ // events on the base target, an event queued on a TEQ is never actually moved
+ // to any other queue. What is actually dispatched to the base is an
+ // "executor" event which, when run, removes an event from the TEQ and runs it
+ // immediately. This means that you can pause a TEQ even after the executor
+ // has been queued on the base target, and even so, no events from the TEQ
+ // will run. When the base target gets around to running the executor, the
+ // executor will see that the TEQ is paused, and do nothing.
+ [[nodiscard]] nsresult SetIsPaused(bool aIsPaused);
+
+ // Return true if this ThrottledEventQueue is paused.
+ bool IsPaused() const;
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIEVENTTARGET_FULL
+
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_THROTTLEDEVENTQUEUE_IID);
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(ThrottledEventQueue, NS_THROTTLEDEVENTQUEUE_IID);
+
+} // namespace mozilla
+
+#endif // mozilla_ThrottledEventQueue_h
diff --git a/xpcom/threads/TimerThread.cpp b/xpcom/threads/TimerThread.cpp
new file mode 100644
index 0000000000..0d672ac7b0
--- /dev/null
+++ b/xpcom/threads/TimerThread.cpp
@@ -0,0 +1,1512 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsTimerImpl.h"
+#include "TimerThread.h"
+
+#include "GeckoProfiler.h"
+#include "nsThreadUtils.h"
+#include "pratom.h"
+
+#include "nsIObserverService.h"
+#include "mozilla/Services.h"
+#include "mozilla/ChaosMode.h"
+#include "mozilla/ArenaAllocator.h"
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/BinarySearch.h"
+#include "mozilla/OperatorNewExtensions.h"
+#include "mozilla/StaticPrefs_timer.h"
+
+#include "mozilla/glean/GleanMetrics.h"
+
+#include <math.h>
+
+using namespace mozilla;
+
+// Bug 1829983 reports an assertion failure that (so far) has only failed once
+// in over a month of the assert existing. This #define enables some additional
+// output that should get printed out if the assert fails again.
+#if defined(XP_WIN) && defined(DEBUG)
+# define HACK_OUTPUT_FOR_BUG_1829983
+#endif
+
+// Uncomment the following line to enable runtime stats during development.
+// #define TIMERS_RUNTIME_STATS
+
+#ifdef TIMERS_RUNTIME_STATS
+// This class gathers durations and displays some basic stats when destroyed.
+// It is intended to be used as a static variable (see `AUTO_TIMERS_STATS`
+// below), to display stats at the end of the program.
+class StaticTimersStats {
+ public:
+ explicit StaticTimersStats(const char* aName) : mName(aName) {}
+
+ ~StaticTimersStats() {
+ // Using unsigned long long for computations and printfs.
+ using ULL = unsigned long long;
+ ULL n = static_cast<ULL>(mCount);
+ if (n == 0) {
+ printf("[%d] Timers stats `%s`: (nothing)\n",
+ int(profiler_current_process_id().ToNumber()), mName);
+ } else if (ULL sumNs = static_cast<ULL>(mSumDurationsNs); sumNs == 0) {
+ printf("[%d] Timers stats `%s`: %llu\n",
+ int(profiler_current_process_id().ToNumber()), mName, n);
+ } else {
+ printf("[%d] Timers stats `%s`: %llu ns / %llu = %llu ns, max %llu ns\n",
+ int(profiler_current_process_id().ToNumber()), mName, sumNs, n,
+ sumNs / n, static_cast<ULL>(mLongestDurationNs));
+ }
+ }
+
+ void AddDurationFrom(TimeStamp aStart) {
+ // Duration between aStart and now, rounded to the nearest nanosecond.
+ DurationNs duration = static_cast<DurationNs>(
+ (TimeStamp::Now() - aStart).ToMicroseconds() * 1000 + 0.5);
+ mSumDurationsNs += duration;
+ ++mCount;
+ // Update mLongestDurationNs if this one is longer.
+ for (;;) {
+ DurationNs longest = mLongestDurationNs;
+ if (MOZ_LIKELY(longest >= duration)) {
+ // This duration is not the longest, nothing to do.
+ break;
+ }
+ if (MOZ_LIKELY(mLongestDurationNs.compareExchange(longest, duration))) {
+ // Successfully updated `mLongestDurationNs` with the new value.
+ break;
+ }
+ // Otherwise someone else just updated `mLongestDurationNs`, we need to
+ // try again by looping.
+ }
+ }
+
+ void AddCount() {
+ MOZ_ASSERT(mSumDurationsNs == 0, "Don't mix counts and durations");
+ ++mCount;
+ }
+
+ private:
+ using DurationNs = uint64_t;
+ using Count = uint32_t;
+
+ Atomic<DurationNs> mSumDurationsNs{0};
+ Atomic<DurationNs> mLongestDurationNs{0};
+ Atomic<Count> mCount{0};
+ const char* mName;
+};
+
+// RAII object that measures its scoped lifetime duration and reports it to a
+// `StaticTimersStats`.
+class MOZ_RAII AutoTimersStats {
+ public:
+ explicit AutoTimersStats(StaticTimersStats& aStats)
+ : mStats(aStats), mStart(TimeStamp::Now()) {}
+
+ ~AutoTimersStats() { mStats.AddDurationFrom(mStart); }
+
+ private:
+ StaticTimersStats& mStats;
+ TimeStamp mStart;
+};
+
+// Macro that should be used to collect basic statistics from measurements of
+// block durations, from where this macro is, until the end of its enclosing
+// scope. The name is used in the static variable name and when displaying stats
+// at the end of the program; Another location could use the same name but their
+// stats will not be combined, so use different name if these locations should
+// be distinguished.
+# define AUTO_TIMERS_STATS(name) \
+ static ::StaticTimersStats sStat##name(#name); \
+ ::AutoTimersStats autoStat##name(sStat##name);
+
+// This macro only counts the number of times it's used, not durations.
+// Don't mix with AUTO_TIMERS_STATS!
+# define COUNT_TIMERS_STATS(name) \
+ static ::StaticTimersStats sStat##name(#name); \
+ sStat##name.AddCount();
+
+#else // TIMERS_RUNTIME_STATS
+
+# define AUTO_TIMERS_STATS(name)
+# define COUNT_TIMERS_STATS(name)
+
+#endif // TIMERS_RUNTIME_STATS else
+
+NS_IMPL_ISUPPORTS_INHERITED(TimerThread, Runnable, nsIObserver)
+
+TimerThread::TimerThread()
+ : Runnable("TimerThread"),
+ mInitialized(false),
+ mMonitor("TimerThread.mMonitor"),
+ mShutdown(false),
+ mWaiting(false),
+ mNotified(false),
+ mSleeping(false),
+ mAllowedEarlyFiringMicroseconds(0) {}
+
+TimerThread::~TimerThread() {
+ mThread = nullptr;
+
+ NS_ASSERTION(mTimers.IsEmpty(), "Timers remain in TimerThread::~TimerThread");
+
+#if TIMER_THREAD_STATISTICS
+ {
+ MonitorAutoLock lock(mMonitor);
+ PrintStatistics();
+ }
+#endif
+}
+
+namespace {
+
+class TimerObserverRunnable : public Runnable {
+ public:
+ explicit TimerObserverRunnable(nsIObserver* aObserver)
+ : mozilla::Runnable("TimerObserverRunnable"), mObserver(aObserver) {}
+
+ NS_DECL_NSIRUNNABLE
+
+ private:
+ nsCOMPtr<nsIObserver> mObserver;
+};
+
+NS_IMETHODIMP
+TimerObserverRunnable::Run() {
+ nsCOMPtr<nsIObserverService> observerService =
+ mozilla::services::GetObserverService();
+ if (observerService) {
+ observerService->AddObserver(mObserver, "sleep_notification", false);
+ observerService->AddObserver(mObserver, "wake_notification", false);
+ observerService->AddObserver(mObserver, "suspend_process_notification",
+ false);
+ observerService->AddObserver(mObserver, "resume_process_notification",
+ false);
+ }
+ return NS_OK;
+}
+
+} // namespace
+
+namespace {
+
+// TimerEventAllocator is a thread-safe allocator used only for nsTimerEvents.
+// It's needed to avoid contention over the default allocator lock when
+// firing timer events (see bug 733277). The thread-safety is required because
+// nsTimerEvent objects are allocated on the timer thread, and freed on another
+// thread. Because TimerEventAllocator has its own lock, contention over that
+// lock is limited to the allocation and deallocation of nsTimerEvent objects.
+//
+// Because this is layered over ArenaAllocator, it never shrinks -- even
+// "freed" nsTimerEvents aren't truly freed, they're just put onto a free-list
+// for later recycling. So the amount of memory consumed will always be equal
+// to the high-water mark consumption. But nsTimerEvents are small and it's
+// unusual to have more than a few hundred of them, so this shouldn't be a
+// problem in practice.
+
+class TimerEventAllocator {
+ private:
+ struct FreeEntry {
+ FreeEntry* mNext;
+ };
+
+ ArenaAllocator<4096> mPool MOZ_GUARDED_BY(mMonitor);
+ FreeEntry* mFirstFree MOZ_GUARDED_BY(mMonitor);
+ mozilla::Monitor mMonitor;
+
+ public:
+ TimerEventAllocator()
+ : mPool(), mFirstFree(nullptr), mMonitor("TimerEventAllocator") {}
+
+ ~TimerEventAllocator() = default;
+
+ void* Alloc(size_t aSize);
+ void Free(void* aPtr);
+};
+
+} // namespace
+
+// This is a nsICancelableRunnable because we can dispatch it to Workers and
+// those can be shut down at any time, and in these cases, Cancel() is called
+// instead of Run().
+class nsTimerEvent final : public CancelableRunnable {
+ public:
+ NS_IMETHOD Run() override;
+
+ nsresult Cancel() override {
+ mTimer->Cancel();
+ return NS_OK;
+ }
+
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ NS_IMETHOD GetName(nsACString& aName) override;
+#endif
+
+ explicit nsTimerEvent(already_AddRefed<nsTimerImpl> aTimer,
+ ProfilerThreadId aTimerThreadId)
+ : mozilla::CancelableRunnable("nsTimerEvent"),
+ mTimer(aTimer),
+ mGeneration(mTimer->GetGeneration()),
+ mTimerThreadId(aTimerThreadId) {
+ // Note: We override operator new for this class, and the override is
+ // fallible!
+ sAllocatorUsers++;
+
+ if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug) ||
+ profiler_thread_is_being_profiled_for_markers(mTimerThreadId)) {
+ mInitTime = TimeStamp::Now();
+ }
+ }
+
+ static void Init();
+ static void Shutdown();
+ static void DeleteAllocatorIfNeeded();
+
+ static void* operator new(size_t aSize) noexcept(true) {
+ return sAllocator->Alloc(aSize);
+ }
+ void operator delete(void* aPtr) {
+ sAllocator->Free(aPtr);
+ sAllocatorUsers--;
+ DeleteAllocatorIfNeeded();
+ }
+
+ already_AddRefed<nsTimerImpl> ForgetTimer() { return mTimer.forget(); }
+
+ private:
+ nsTimerEvent(const nsTimerEvent&) = delete;
+ nsTimerEvent& operator=(const nsTimerEvent&) = delete;
+ nsTimerEvent& operator=(const nsTimerEvent&&) = delete;
+
+ ~nsTimerEvent() {
+ MOZ_ASSERT(!sCanDeleteAllocator || sAllocatorUsers > 0,
+ "This will result in us attempting to deallocate the "
+ "nsTimerEvent allocator twice");
+ }
+
+ TimeStamp mInitTime;
+ RefPtr<nsTimerImpl> mTimer;
+ const int32_t mGeneration;
+ ProfilerThreadId mTimerThreadId;
+
+ static TimerEventAllocator* sAllocator;
+
+ static Atomic<int32_t, SequentiallyConsistent> sAllocatorUsers;
+ static Atomic<bool, SequentiallyConsistent> sCanDeleteAllocator;
+};
+
+TimerEventAllocator* nsTimerEvent::sAllocator = nullptr;
+Atomic<int32_t, SequentiallyConsistent> nsTimerEvent::sAllocatorUsers;
+Atomic<bool, SequentiallyConsistent> nsTimerEvent::sCanDeleteAllocator;
+
+namespace {
+
+void* TimerEventAllocator::Alloc(size_t aSize) {
+ MOZ_ASSERT(aSize == sizeof(nsTimerEvent));
+
+ mozilla::MonitorAutoLock lock(mMonitor);
+
+ void* p;
+ if (mFirstFree) {
+ p = mFirstFree;
+ mFirstFree = mFirstFree->mNext;
+ } else {
+ p = mPool.Allocate(aSize, fallible);
+ }
+
+ return p;
+}
+
+void TimerEventAllocator::Free(void* aPtr) {
+ mozilla::MonitorAutoLock lock(mMonitor);
+
+ FreeEntry* entry = reinterpret_cast<FreeEntry*>(aPtr);
+
+ entry->mNext = mFirstFree;
+ mFirstFree = entry;
+}
+
+} // namespace
+
+struct TimerMarker {
+ static constexpr Span<const char> MarkerTypeName() {
+ return MakeStringSpan("Timer");
+ }
+ static void StreamJSONMarkerData(baseprofiler::SpliceableJSONWriter& aWriter,
+ uint32_t aDelay, uint8_t aType,
+ MarkerThreadId aThreadId, bool aCanceled) {
+ aWriter.IntProperty("delay", aDelay);
+ if (!aThreadId.IsUnspecified()) {
+ // Tech note: If `ToNumber()` returns a uint64_t, the conversion to
+ // int64_t is "implementation-defined" before C++20. This is
+ // acceptable here, because this is a one-way conversion to a unique
+ // identifier that's used to visually separate data by thread on the
+ // front-end.
+ aWriter.IntProperty(
+ "threadId", static_cast<int64_t>(aThreadId.ThreadId().ToNumber()));
+ }
+ if (aCanceled) {
+ aWriter.BoolProperty("canceled", true);
+ // Show a red 'X' as a prefix on the marker chart for canceled timers.
+ aWriter.StringProperty("prefix", "❌");
+ }
+
+ // The string property for the timer type is not written when the type is
+ // one shot, as that's the type used almost all the time, and that would
+ // consume space in the profiler buffer and then in the profile JSON,
+ // getting in the way of capturing long power profiles.
+ // Bug 1815677 might make this cheap to capture.
+ if (aType != nsITimer::TYPE_ONE_SHOT) {
+ if (aType == nsITimer::TYPE_REPEATING_SLACK) {
+ aWriter.StringProperty("ttype", "repeating slack");
+ } else if (aType == nsITimer::TYPE_REPEATING_PRECISE) {
+ aWriter.StringProperty("ttype", "repeating precise");
+ } else if (aType == nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP) {
+ aWriter.StringProperty("ttype", "repeating precise can skip");
+ } else if (aType == nsITimer::TYPE_REPEATING_SLACK_LOW_PRIORITY) {
+ aWriter.StringProperty("ttype", "repeating slack low priority");
+ } else if (aType == nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY) {
+ aWriter.StringProperty("ttype", "low priority");
+ }
+ }
+ }
+ static MarkerSchema MarkerTypeDisplay() {
+ using MS = MarkerSchema;
+ MS schema{MS::Location::MarkerChart, MS::Location::MarkerTable};
+ schema.AddKeyLabelFormat("delay", "Delay", MS::Format::Milliseconds);
+ schema.AddKeyLabelFormat("ttype", "Timer Type", MS::Format::String);
+ schema.AddKeyLabelFormat("canceled", "Canceled", MS::Format::String);
+ schema.SetChartLabel("{marker.data.prefix} {marker.data.delay}");
+ schema.SetTableLabel(
+ "{marker.name} - {marker.data.prefix} {marker.data.delay}");
+ return schema;
+ }
+};
+
+struct AddRemoveTimerMarker {
+ static constexpr Span<const char> MarkerTypeName() {
+ return MakeStringSpan("AddRemoveTimer");
+ }
+ static void StreamJSONMarkerData(baseprofiler::SpliceableJSONWriter& aWriter,
+ const ProfilerString8View& aTimerName,
+ uint32_t aDelay, MarkerThreadId aThreadId) {
+ aWriter.StringProperty("name", aTimerName);
+ aWriter.IntProperty("delay", aDelay);
+ if (!aThreadId.IsUnspecified()) {
+ // Tech note: If `ToNumber()` returns a uint64_t, the conversion to
+ // int64_t is "implementation-defined" before C++20. This is
+ // acceptable here, because this is a one-way conversion to a unique
+ // identifier that's used to visually separate data by thread on the
+ // front-end.
+ aWriter.IntProperty(
+ "threadId", static_cast<int64_t>(aThreadId.ThreadId().ToNumber()));
+ }
+ }
+ static MarkerSchema MarkerTypeDisplay() {
+ using MS = MarkerSchema;
+ MS schema{MS::Location::MarkerChart, MS::Location::MarkerTable};
+ schema.AddKeyLabelFormatSearchable("name", "Name", MS::Format::String,
+ MS::Searchable::Searchable);
+ schema.AddKeyLabelFormat("delay", "Delay", MS::Format::Milliseconds);
+ schema.SetTableLabel(
+ "{marker.name} - {marker.data.name} - {marker.data.delay}");
+ return schema;
+ }
+};
+
+void nsTimerEvent::Init() { sAllocator = new TimerEventAllocator(); }
+
+void nsTimerEvent::Shutdown() {
+ sCanDeleteAllocator = true;
+ DeleteAllocatorIfNeeded();
+}
+
+void nsTimerEvent::DeleteAllocatorIfNeeded() {
+ if (sCanDeleteAllocator && sAllocatorUsers == 0) {
+ delete sAllocator;
+ sAllocator = nullptr;
+ }
+}
+
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+NS_IMETHODIMP
+nsTimerEvent::GetName(nsACString& aName) {
+ bool current;
+ MOZ_RELEASE_ASSERT(
+ NS_SUCCEEDED(mTimer->mEventTarget->IsOnCurrentThread(&current)) &&
+ current);
+
+ mTimer->GetName(aName);
+ return NS_OK;
+}
+#endif
+
+NS_IMETHODIMP
+nsTimerEvent::Run() {
+ if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) {
+ TimeStamp now = TimeStamp::Now();
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] time between PostTimerEvent() and Fire(): %fms\n", this,
+ (now - mInitTime).ToMilliseconds()));
+ }
+
+ if (profiler_thread_is_being_profiled_for_markers(mTimerThreadId)) {
+ MutexAutoLock lock(mTimer->mMutex);
+ nsAutoCString name;
+ mTimer->GetName(name, lock);
+ // This adds a marker with the timer name as the marker name, to make it
+ // obvious which timers are being used. This marker will be useful to
+ // understand which timers might be added and firing excessively often.
+ profiler_add_marker(
+ name, geckoprofiler::category::TIMER,
+ MarkerOptions(MOZ_LIKELY(mInitTime)
+ ? MarkerTiming::Interval(
+ mTimer->mTimeout - mTimer->mDelay, mInitTime)
+ : MarkerTiming::IntervalUntilNowFrom(
+ mTimer->mTimeout - mTimer->mDelay),
+ MarkerThreadId(mTimerThreadId)),
+ TimerMarker{}, mTimer->mDelay.ToMilliseconds(), mTimer->mType,
+ MarkerThreadId::CurrentThread(), false);
+ // This marker is meant to help understand the behavior of the timer thread.
+ profiler_add_marker(
+ "PostTimerEvent", geckoprofiler::category::OTHER,
+ MarkerOptions(MOZ_LIKELY(mInitTime)
+ ? MarkerTiming::IntervalUntilNowFrom(mInitTime)
+ : MarkerTiming::InstantNow(),
+ MarkerThreadId(mTimerThreadId)),
+ AddRemoveTimerMarker{}, name, mTimer->mDelay.ToMilliseconds(),
+ MarkerThreadId::CurrentThread());
+ }
+
+ mTimer->Fire(mGeneration);
+
+ return NS_OK;
+}
+
+nsresult TimerThread::Init() {
+ mMonitor.AssertCurrentThreadOwns();
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("TimerThread::Init [%d]\n", mInitialized));
+
+ if (!mInitialized) {
+ nsTimerEvent::Init();
+
+ // We hold on to mThread to keep the thread alive.
+ nsresult rv =
+ NS_NewNamedThread("Timer", getter_AddRefs(mThread), this,
+ {.stackSize = nsIThreadManager::DEFAULT_STACK_SIZE,
+ .blockDispatch = true});
+ if (NS_FAILED(rv)) {
+ mThread = nullptr;
+ } else {
+ RefPtr<TimerObserverRunnable> r = new TimerObserverRunnable(this);
+ if (NS_IsMainThread()) {
+ r->Run();
+ } else {
+ NS_DispatchToMainThread(r);
+ }
+ }
+
+ mInitialized = true;
+ }
+
+ if (!mThread) {
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+}
+
+nsresult TimerThread::Shutdown() {
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug, ("TimerThread::Shutdown begin\n"));
+
+ if (!mThread) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ nsTArray<RefPtr<nsTimerImpl>> timers;
+ {
+ // lock scope
+ MonitorAutoLock lock(mMonitor);
+
+ mShutdown = true;
+
+ // notify the cond var so that Run() can return
+ if (mWaiting) {
+ mNotified = true;
+ mMonitor.Notify();
+ }
+
+ // Need to copy content of mTimers array to a local array
+ // because call to timers' Cancel() (and release its self)
+ // must not be done under the lock. Destructor of a callback
+ // might potentially call some code reentering the same lock
+ // that leads to unexpected behavior or deadlock.
+ // See bug 422472.
+ timers.SetCapacity(mTimers.Length());
+ for (Entry& entry : mTimers) {
+ if (entry.Value()) {
+ timers.AppendElement(entry.Take());
+ }
+ }
+
+ mTimers.Clear();
+ }
+
+ for (const RefPtr<nsTimerImpl>& timer : timers) {
+ MOZ_ASSERT(timer);
+ timer->Cancel();
+ }
+
+ mThread->Shutdown(); // wait for the thread to die
+
+ nsTimerEvent::Shutdown();
+
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug, ("TimerThread::Shutdown end\n"));
+ return NS_OK;
+}
+
+namespace {
+
+struct MicrosecondsToInterval {
+ PRIntervalTime operator[](size_t aMs) const {
+ return PR_MicrosecondsToInterval(aMs);
+ }
+};
+
+struct IntervalComparator {
+ int operator()(PRIntervalTime aInterval) const {
+ return (0 < aInterval) ? -1 : 1;
+ }
+};
+
+} // namespace
+
+#ifdef DEBUG
+void TimerThread::VerifyTimerListConsistency() const {
+ mMonitor.AssertCurrentThreadOwns();
+
+ // Find the first non-canceled timer (and check its cached timeout if we find
+ // it).
+ const size_t timerCount = mTimers.Length();
+ size_t lastNonCanceledTimerIndex = 0;
+ while (lastNonCanceledTimerIndex < timerCount &&
+ !mTimers[lastNonCanceledTimerIndex].Value()) {
+ ++lastNonCanceledTimerIndex;
+ }
+ MOZ_ASSERT(lastNonCanceledTimerIndex == timerCount ||
+ mTimers[lastNonCanceledTimerIndex].Value());
+ MOZ_ASSERT(lastNonCanceledTimerIndex == timerCount ||
+ mTimers[lastNonCanceledTimerIndex].Value()->mTimeout ==
+ mTimers[lastNonCanceledTimerIndex].Timeout());
+
+ // Verify that mTimers is sorted and the cached timeouts are consistent.
+ for (size_t timerIndex = lastNonCanceledTimerIndex + 1;
+ timerIndex < timerCount; ++timerIndex) {
+ if (mTimers[timerIndex].Value()) {
+ MOZ_ASSERT(mTimers[timerIndex].Timeout() ==
+ mTimers[timerIndex].Value()->mTimeout);
+ MOZ_ASSERT(mTimers[timerIndex].Timeout() >=
+ mTimers[lastNonCanceledTimerIndex].Timeout());
+ lastNonCanceledTimerIndex = timerIndex;
+ }
+ }
+}
+#endif
+
+size_t TimerThread::ComputeTimerInsertionIndex(const TimeStamp& timeout) const {
+ mMonitor.AssertCurrentThreadOwns();
+
+ const size_t timerCount = mTimers.Length();
+
+ size_t firstGtIndex = 0;
+ while (firstGtIndex < timerCount &&
+ (!mTimers[firstGtIndex].Value() ||
+ mTimers[firstGtIndex].Timeout() <= timeout)) {
+ ++firstGtIndex;
+ }
+
+ return firstGtIndex;
+}
+
+TimeStamp TimerThread::ComputeWakeupTimeFromTimers() const {
+ mMonitor.AssertCurrentThreadOwns();
+
+ // Timer list should be non-empty and first timer should always be
+ // non-canceled at this point and we rely on that here.
+ MOZ_ASSERT(!mTimers.IsEmpty());
+ MOZ_ASSERT(mTimers[0].Value());
+
+ // Overview: Find the last timer in the list that can be "bundled" together in
+ // the same wake-up with mTimers[0] and use its timeout as our target wake-up
+ // time.
+
+ // bundleWakeup is when we should wake up in order to be able to fire all of
+ // the timers in our selected bundle. It will always be the timeout of the
+ // last timer in the bundle.
+ TimeStamp bundleWakeup = mTimers[0].Timeout();
+
+ // cutoffTime is the latest that we can wake up for the timers currently
+ // accepted into the bundle. These needs to be updated as we go through the
+ // list because later timers may have more strict delay tolerances.
+ const TimeDuration minTimerDelay = TimeDuration::FromMilliseconds(
+ StaticPrefs::timer_minimum_firing_delay_tolerance_ms());
+ const TimeDuration maxTimerDelay = TimeDuration::FromMilliseconds(
+ StaticPrefs::timer_maximum_firing_delay_tolerance_ms());
+ TimeStamp cutoffTime =
+ bundleWakeup + ComputeAcceptableFiringDelay(mTimers[0].Delay(),
+ minTimerDelay, maxTimerDelay);
+
+ const size_t timerCount = mTimers.Length();
+ for (size_t entryIndex = 1; entryIndex < timerCount; ++entryIndex) {
+ const Entry& curEntry = mTimers[entryIndex];
+ const nsTimerImpl* curTimer = curEntry.Value();
+ if (!curTimer) {
+ // Canceled timer - skip it
+ continue;
+ }
+
+ const TimeStamp curTimerDue = curEntry.Timeout();
+ if (curTimerDue > cutoffTime) {
+ // Can't include this timer in the bundle - it fires too late.
+ break;
+ }
+
+ // This timer can be included in the bundle. Update bundleWakeup and
+ // cutoffTime.
+ bundleWakeup = curTimerDue;
+ cutoffTime = std::min(
+ curTimerDue + ComputeAcceptableFiringDelay(
+ curEntry.Delay(), minTimerDelay, maxTimerDelay),
+ cutoffTime);
+ MOZ_ASSERT(bundleWakeup <= cutoffTime);
+ }
+
+#ifdef HACK_OUTPUT_FOR_BUG_1829983
+ const bool assertCondition =
+ bundleWakeup - mTimers[0].Timeout() <=
+ ComputeAcceptableFiringDelay(mTimers[0].Delay(), minTimerDelay,
+ maxTimerDelay);
+ if (!assertCondition) {
+ printf_stderr("*** Special TimerThread debug output ***\n");
+ const int64_t tDMin = minTimerDelay.GetValue();
+ const int64_t tDMax = maxTimerDelay.GetValue();
+ printf_stderr("%16llx / %16llx\n", tDMin, tDMax);
+ const size_t l = mTimers.Length();
+ for (size_t i = 0; i < l; ++i) {
+ const Entry& e = mTimers[i];
+ const TimeStamp tS = e.Timeout();
+ const TimeStampValue tSV = tS.GetValue();
+ const TimeDuration d = e.Delay();
+ printf_stderr("[%5zu] %16llx / %16llx / %d / %d / %16llx\n", i, tSV.GTC(),
+ tSV.QPC(), (int)tSV.IsNull(), (int)tSV.HasQPC(),
+ d.GetValue());
+ }
+ }
+#endif
+ MOZ_ASSERT(bundleWakeup - mTimers[0].Timeout() <=
+ ComputeAcceptableFiringDelay(mTimers[0].Delay(), minTimerDelay,
+ maxTimerDelay));
+
+ return bundleWakeup;
+}
+
+TimeDuration TimerThread::ComputeAcceptableFiringDelay(
+ TimeDuration timerDuration, TimeDuration minDelay,
+ TimeDuration maxDelay) const {
+ // Use the timer's duration divided by this value as a base for how much
+ // firing delay a timer can accept. 8 was chosen specifically because it is a
+ // power of two which means that this division turns nicely into a shift.
+ constexpr int64_t timerDurationDivider = 8;
+ static_assert(IsPowerOfTwo(static_cast<uint64_t>(timerDurationDivider)));
+ const TimeDuration tmp = timerDuration / timerDurationDivider;
+ return std::min(std::max(minDelay, tmp), maxDelay);
+}
+
+NS_IMETHODIMP
+TimerThread::Run() {
+ MonitorAutoLock lock(mMonitor);
+
+ mProfilerThreadId = profiler_current_thread_id();
+
+ // We need to know how many microseconds give a positive PRIntervalTime. This
+ // is platform-dependent and we calculate it at runtime, finding a value |v|
+ // such that |PR_MicrosecondsToInterval(v) > 0| and then binary-searching in
+ // the range [0, v) to find the ms-to-interval scale.
+ uint32_t usForPosInterval = 1;
+ while (PR_MicrosecondsToInterval(usForPosInterval) == 0) {
+ usForPosInterval <<= 1;
+ }
+
+ size_t usIntervalResolution;
+ BinarySearchIf(MicrosecondsToInterval(), 0, usForPosInterval,
+ IntervalComparator(), &usIntervalResolution);
+ MOZ_ASSERT(PR_MicrosecondsToInterval(usIntervalResolution - 1) == 0);
+ MOZ_ASSERT(PR_MicrosecondsToInterval(usIntervalResolution) == 1);
+
+ // Half of the amount of microseconds needed to get positive PRIntervalTime.
+ // We use this to decide how to round our wait times later
+ mAllowedEarlyFiringMicroseconds = usIntervalResolution / 2;
+ bool forceRunNextTimer = false;
+
+ // Queue for tracking of how many timers are fired on each wake-up. We need to
+ // buffer these locally and only send off to glean occasionally to avoid
+ // performance hit.
+ static constexpr size_t kMaxQueuedTimerFired = 128;
+ size_t queuedTimerFiredCount = 0;
+ AutoTArray<uint64_t, kMaxQueuedTimerFired> queuedTimersFiredPerWakeup;
+ queuedTimersFiredPerWakeup.SetLengthAndRetainStorage(kMaxQueuedTimerFired);
+
+ uint64_t timersFiredThisWakeup = 0;
+ while (!mShutdown) {
+ // Have to use PRIntervalTime here, since PR_WaitCondVar takes it
+ TimeDuration waitFor;
+ bool forceRunThisTimer = forceRunNextTimer;
+ forceRunNextTimer = false;
+
+#ifdef DEBUG
+ VerifyTimerListConsistency();
+#endif
+
+ if (mSleeping) {
+ // Sleep for 0.1 seconds while not firing timers.
+ uint32_t milliseconds = 100;
+ if (ChaosMode::isActive(ChaosFeature::TimerScheduling)) {
+ milliseconds = ChaosMode::randomUint32LessThan(200);
+ }
+ waitFor = TimeDuration::FromMilliseconds(milliseconds);
+ } else {
+ waitFor = TimeDuration::Forever();
+ TimeStamp now = TimeStamp::Now();
+
+#if TIMER_THREAD_STATISTICS
+ if (!mNotified && !mIntendedWakeupTime.IsNull() &&
+ now < mIntendedWakeupTime) {
+ ++mEarlyWakeups;
+ const double earlinessms = (mIntendedWakeupTime - now).ToMilliseconds();
+ mTotalEarlyWakeupTime += earlinessms;
+ }
+#endif
+
+ RemoveLeadingCanceledTimersInternal();
+
+ if (!mTimers.IsEmpty()) {
+ if (now >= mTimers[0].Value()->mTimeout || forceRunThisTimer) {
+ next:
+ // NB: AddRef before the Release under RemoveTimerInternal to avoid
+ // mRefCnt passing through zero, in case all other refs than the one
+ // from mTimers have gone away (the last non-mTimers[i]-ref's Release
+ // must be racing with us, blocked in gThread->RemoveTimer waiting
+ // for TimerThread::mMonitor, under nsTimerImpl::Release.
+
+ RefPtr<nsTimerImpl> timerRef(mTimers[0].Take());
+ RemoveFirstTimerInternal();
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("Timer thread woke up %fms from when it was supposed to\n",
+ fabs((now - timerRef->mTimeout).ToMilliseconds())));
+
+ // We are going to let the call to PostTimerEvent here handle the
+ // release of the timer so that we don't end up releasing the timer
+ // on the TimerThread instead of on the thread it targets.
+ {
+ ++timersFiredThisWakeup;
+ LogTimerEvent::Run run(timerRef.get());
+ PostTimerEvent(timerRef.forget());
+ }
+
+ if (mShutdown) {
+ break;
+ }
+
+ // Update now, as PostTimerEvent plus the locking may have taken a
+ // tick or two, and we may goto next below.
+ now = TimeStamp::Now();
+ }
+ }
+
+ RemoveLeadingCanceledTimersInternal();
+
+ if (!mTimers.IsEmpty()) {
+ TimeStamp timeout = mTimers[0].Value()->mTimeout;
+
+ // Don't wait at all (even for PR_INTERVAL_NO_WAIT) if the next timer
+ // is due now or overdue.
+ //
+ // Note that we can only sleep for integer values of a certain
+ // resolution. We use mAllowedEarlyFiringMicroseconds, calculated
+ // before, to do the optimal rounding (i.e., of how to decide what
+ // interval is so small we should not wait at all).
+ double microseconds = (timeout - now).ToMicroseconds();
+
+ // The mean value of sFractions must be 1 to ensure that the average of
+ // a long sequence of timeouts converges to the actual sum of their
+ // times.
+ static constexpr double sChaosFractions[] = {0.0, 0.25, 0.5, 0.75,
+ 1.0, 1.75, 2.75};
+ if (ChaosMode::isActive(ChaosFeature::TimerScheduling)) {
+ microseconds *= sChaosFractions[ChaosMode::randomUint32LessThan(
+ ArrayLength(sChaosFractions))];
+ forceRunNextTimer = true;
+ }
+
+ if (microseconds < mAllowedEarlyFiringMicroseconds) {
+ forceRunNextTimer = false;
+ goto next; // round down; execute event now
+ }
+
+ // TECHNICAL NOTE: Determining waitFor (by subtracting |now| from our
+ // desired wake-up time) at this point is not ideal. For one thing, the
+ // |now| that we have at this point is somewhat old. Secondly, there is
+ // quite a bit of code between here and where we actually use waitFor to
+ // request sleep. If I am thinking about this correctly, both of these
+ // will contribute to us requesting more sleep than is actually needed
+ // to wake up at our desired time. We could avoid this problem by only
+ // determining our desired wake-up time here and then calculating the
+ // wait time when we're actually about to sleep.
+ const TimeStamp wakeupTime = ComputeWakeupTimeFromTimers();
+ waitFor = wakeupTime - now;
+
+ // If this were to fail that would mean that we had more timers that we
+ // should have fired.
+ MOZ_ASSERT(!waitFor.IsZero());
+
+ if (ChaosMode::isActive(ChaosFeature::TimerScheduling)) {
+ // If chaos mode is active then mess with the amount of time that we
+ // request to sleep (without changing what we record as our expected
+ // wake-up time). This will simulate unintended early/late wake-ups.
+ const double waitInMs = waitFor.ToMilliseconds();
+ const double chaosWaitInMs =
+ waitInMs * sChaosFractions[ChaosMode::randomUint32LessThan(
+ ArrayLength(sChaosFractions))];
+ waitFor = TimeDuration::FromMilliseconds(chaosWaitInMs);
+ }
+
+ mIntendedWakeupTime = wakeupTime;
+ } else {
+ mIntendedWakeupTime = TimeStamp{};
+ }
+
+ if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) {
+ if (waitFor == TimeDuration::Forever())
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug, ("waiting forever\n"));
+ else
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("waiting for %f\n", waitFor.ToMilliseconds()));
+ }
+ }
+
+ {
+ // About to sleep - let's make note of how many timers we processed and
+ // see if we should send out a new batch of telemetry.
+ queuedTimersFiredPerWakeup[queuedTimerFiredCount] = timersFiredThisWakeup;
+ ++queuedTimerFiredCount;
+ if (queuedTimerFiredCount == kMaxQueuedTimerFired) {
+ glean::timer_thread::timers_fired_per_wakeup.AccumulateSamples(
+ queuedTimersFiredPerWakeup);
+ queuedTimerFiredCount = 0;
+ }
+ }
+
+#if TIMER_THREAD_STATISTICS
+ {
+ size_t bucketIndex = 0;
+ while (bucketIndex < sTimersFiredPerWakeupBucketCount - 1 &&
+ timersFiredThisWakeup >
+ sTimersFiredPerWakeupThresholds[bucketIndex]) {
+ ++bucketIndex;
+ }
+ MOZ_ASSERT(bucketIndex < sTimersFiredPerWakeupBucketCount);
+ ++mTimersFiredPerWakeup[bucketIndex];
+
+ ++mTotalWakeupCount;
+ if (mNotified) {
+ ++mTimersFiredPerNotifiedWakeup[bucketIndex];
+ ++mTotalNotifiedWakeupCount;
+ } else {
+ ++mTimersFiredPerUnnotifiedWakeup[bucketIndex];
+ ++mTotalUnnotifiedWakeupCount;
+ }
+ }
+#endif
+
+ timersFiredThisWakeup = 0;
+
+ mWaiting = true;
+ mNotified = false;
+
+ {
+ AUTO_PROFILER_TRACING_MARKER("TimerThread", "Wait", OTHER);
+ mMonitor.Wait(waitFor);
+ }
+ if (mNotified) {
+ forceRunNextTimer = false;
+ }
+ mWaiting = false;
+ }
+
+ // About to shut down - let's send out the final batch of timers fired counts.
+ if (queuedTimerFiredCount != 0) {
+ queuedTimersFiredPerWakeup.SetLengthAndRetainStorage(queuedTimerFiredCount);
+ glean::timer_thread::timers_fired_per_wakeup.AccumulateSamples(
+ queuedTimersFiredPerWakeup);
+ }
+
+ return NS_OK;
+}
+
+nsresult TimerThread::AddTimer(nsTimerImpl* aTimer,
+ const MutexAutoLock& aProofOfLock) {
+ MonitorAutoLock lock(mMonitor);
+ AUTO_TIMERS_STATS(TimerThread_AddTimer);
+
+ if (!aTimer->mEventTarget) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ nsresult rv = Init();
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // Awaken the timer thread if:
+ // - This timer needs to fire *before* the Timer Thread is scheduled to wake
+ // up.
+ // AND/OR
+ // - The delay is 0, which is usually meant to be run as soon as possible.
+ // Note: Even if the thread is scheduled to wake up now/soon, on some
+ // systems there could be a significant delay compared to notifying, which
+ // is almost immediate; and some users of 0-delay depend on it being this
+ // fast!
+ const TimeDuration minTimerDelay = TimeDuration::FromMilliseconds(
+ StaticPrefs::timer_minimum_firing_delay_tolerance_ms());
+ const TimeDuration maxTimerDelay = TimeDuration::FromMilliseconds(
+ StaticPrefs::timer_maximum_firing_delay_tolerance_ms());
+ const TimeDuration firingDelay = ComputeAcceptableFiringDelay(
+ aTimer->mDelay, minTimerDelay, maxTimerDelay);
+ const bool firingBeforeNextWakeup =
+ mIntendedWakeupTime.IsNull() ||
+ (aTimer->mTimeout + firingDelay < mIntendedWakeupTime);
+ const bool wakeUpTimerThread =
+ mWaiting && (firingBeforeNextWakeup || aTimer->mDelay.IsZero());
+
+#if TIMER_THREAD_STATISTICS
+ if (mTotalTimersAdded == 0) {
+ mFirstTimerAdded = TimeStamp::Now();
+ }
+ ++mTotalTimersAdded;
+#endif
+
+ // Add the timer to our list.
+ if (!AddTimerInternal(*aTimer)) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ if (wakeUpTimerThread) {
+ mNotified = true;
+ mMonitor.Notify();
+ }
+
+ if (profiler_thread_is_being_profiled_for_markers(mProfilerThreadId)) {
+ nsAutoCString name;
+ aTimer->GetName(name, aProofOfLock);
+
+ nsLiteralCString prefix("Anonymous_");
+ profiler_add_marker(
+ "AddTimer", geckoprofiler::category::OTHER,
+ MarkerOptions(MarkerThreadId(mProfilerThreadId),
+ MarkerStack::MaybeCapture(
+ name.Equals("nonfunction:JS") ||
+ StringHead(name, prefix.Length()) == prefix)),
+ AddRemoveTimerMarker{}, name, aTimer->mDelay.ToMilliseconds(),
+ MarkerThreadId::CurrentThread());
+ }
+
+ return NS_OK;
+}
+
+nsresult TimerThread::RemoveTimer(nsTimerImpl* aTimer,
+ const MutexAutoLock& aProofOfLock) {
+ MonitorAutoLock lock(mMonitor);
+ AUTO_TIMERS_STATS(TimerThread_RemoveTimer);
+
+ // Remove the timer from our array. Tell callers that aTimer was not found
+ // by returning NS_ERROR_NOT_AVAILABLE.
+
+ if (!RemoveTimerInternal(*aTimer)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+#if TIMER_THREAD_STATISTICS
+ ++mTotalTimersRemoved;
+#endif
+
+ // Note: The timer thread is *not* awoken.
+ // The removed-timer entry is just left null, and will be reused (by a new or
+ // re-set timer) or discarded (when the timer thread logic handles non-null
+ // timers around it).
+ // If this was the front timer, and in the unlikely case that its entry is not
+ // soon reused by a re-set timer, the timer thread will wake up at the
+ // previously-scheduled time, but will quickly notice that there is no actual
+ // pending timer, and will restart its wait until the following real timeout.
+
+ if (profiler_thread_is_being_profiled_for_markers(mProfilerThreadId)) {
+ nsAutoCString name;
+ aTimer->GetName(name, aProofOfLock);
+
+ nsLiteralCString prefix("Anonymous_");
+ // This marker is meant to help understand the behavior of the timer thread.
+ profiler_add_marker(
+ "RemoveTimer", geckoprofiler::category::OTHER,
+ MarkerOptions(MarkerThreadId(mProfilerThreadId),
+ MarkerStack::MaybeCapture(
+ name.Equals("nonfunction:JS") ||
+ StringHead(name, prefix.Length()) == prefix)),
+ AddRemoveTimerMarker{}, name, aTimer->mDelay.ToMilliseconds(),
+ MarkerThreadId::CurrentThread());
+ // This adds a marker with the timer name as the marker name, to make it
+ // obvious which timers are being used. This marker will be useful to
+ // understand which timers might be added and removed excessively often.
+ profiler_add_marker(name, geckoprofiler::category::TIMER,
+ MarkerOptions(MarkerTiming::IntervalUntilNowFrom(
+ aTimer->mTimeout - aTimer->mDelay),
+ MarkerThreadId(mProfilerThreadId)),
+ TimerMarker{}, aTimer->mDelay.ToMilliseconds(),
+ aTimer->mType, MarkerThreadId::CurrentThread(), true);
+ }
+
+ return NS_OK;
+}
+
+TimeStamp TimerThread::FindNextFireTimeForCurrentThread(TimeStamp aDefault,
+ uint32_t aSearchBound) {
+ MonitorAutoLock lock(mMonitor);
+ AUTO_TIMERS_STATS(TimerThread_FindNextFireTimeForCurrentThread);
+
+ for (const Entry& entry : mTimers) {
+ const nsTimerImpl* timer = entry.Value();
+ if (timer) {
+ if (entry.Timeout() > aDefault) {
+ return aDefault;
+ }
+
+ // Don't yield to timers created with the *_LOW_PRIORITY type.
+ if (!timer->IsLowPriority()) {
+ bool isOnCurrentThread = false;
+ nsresult rv =
+ timer->mEventTarget->IsOnCurrentThread(&isOnCurrentThread);
+ if (NS_SUCCEEDED(rv) && isOnCurrentThread) {
+ return entry.Timeout();
+ }
+ }
+
+ if (aSearchBound == 0) {
+ // Return the currently highest timeout when we reach the bound.
+ // This won't give accurate information if we stop before finding
+ // any timer for the current thread, but at least won't report too
+ // long idle period.
+ return timer->mTimeout;
+ }
+
+ --aSearchBound;
+ }
+ }
+
+ // No timers for this thread, return the default.
+ return aDefault;
+}
+
+// This function must be called from within a lock
+// Also: we hold the mutex for the nsTimerImpl.
+bool TimerThread::AddTimerInternal(nsTimerImpl& aTimer) {
+ mMonitor.AssertCurrentThreadOwns();
+ aTimer.mMutex.AssertCurrentThreadOwns();
+ AUTO_TIMERS_STATS(TimerThread_AddTimerInternal);
+ if (mShutdown) {
+ return false;
+ }
+
+ LogTimerEvent::LogDispatch(&aTimer);
+
+ const TimeStamp& timeout = aTimer.mTimeout;
+ const size_t insertionIndex = ComputeTimerInsertionIndex(timeout);
+
+ if (insertionIndex != 0 && !mTimers[insertionIndex - 1].Value()) {
+ // Very common scenario in practice: The timer just before the insertion
+ // point is canceled, overwrite it.
+ AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_overwrite_before);
+ mTimers[insertionIndex - 1] = Entry{aTimer};
+ return true;
+ }
+
+ const size_t length = mTimers.Length();
+ if (insertionIndex == length) {
+ // We're at the end (including it's the very first insertion), add new timer
+ // at the end.
+ AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_append);
+ return mTimers.AppendElement(Entry{aTimer}, mozilla::fallible);
+ }
+
+ if (!mTimers[insertionIndex].Value()) {
+ // The timer at the insertion point is canceled, overwrite it.
+ AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_overwrite);
+ mTimers[insertionIndex] = Entry{aTimer};
+ return true;
+ }
+
+ // The new timer has to be inserted.
+ AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_insert);
+ // The capacity should be checked first, because if it needs to be increased
+ // and the memory allocation fails, only the new timer should be lost.
+ if (length == mTimers.Capacity() && mTimers[length - 1].Value()) {
+ // We have reached capacity, and the last entry is not canceled, so we
+ // really want to increase the capacity in case the extra slot is required.
+ // To force-expand the array, append a canceled-timer entry with a timestamp
+ // far in the future.
+ // This empty Entry may be used below to receive the moved-from previous
+ // entry. If not, it may be used in a later call if we need to append a new
+ // timer at the end.
+ AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_insert_expand);
+ if (!mTimers.AppendElement(
+ Entry{mTimers[length - 1].Timeout() +
+ TimeDuration::FromSeconds(365.0 * 24.0 * 60.0 * 60.0)},
+ mozilla::fallible)) {
+ return false;
+ }
+ }
+
+ // Extract the timer at the insertion point, and put the new timer in its
+ // place.
+ Entry extractedEntry = std::exchange(mTimers[insertionIndex], Entry{aTimer});
+ // Following entries can be pushed until we hit a canceled timer or the end.
+ for (size_t i = insertionIndex + 1; i < length; ++i) {
+ Entry& entryRef = mTimers[i];
+ if (!entryRef.Value()) {
+ // Canceled entry, overwrite it with the extracted entry from before.
+ COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_overwrite);
+ entryRef = std::move(extractedEntry);
+ return true;
+ }
+ // Write extracted entry from before, and extract current entry.
+ COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_shifts);
+ std::swap(entryRef, extractedEntry);
+ }
+ // We've reached the end of the list, with still one extracted entry to
+ // re-insert. We've checked the capacity above, this cannot fail.
+ COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_append);
+ mTimers.AppendElement(std::move(extractedEntry));
+ return true;
+}
+
+// This function must be called from within a lock
+// Also: we hold the mutex for the nsTimerImpl.
+bool TimerThread::RemoveTimerInternal(nsTimerImpl& aTimer) {
+ mMonitor.AssertCurrentThreadOwns();
+ aTimer.mMutex.AssertCurrentThreadOwns();
+ AUTO_TIMERS_STATS(TimerThread_RemoveTimerInternal);
+ if (!aTimer.IsInTimerThread()) {
+ COUNT_TIMERS_STATS(TimerThread_RemoveTimerInternal_not_in_list);
+ return false;
+ }
+ AUTO_TIMERS_STATS(TimerThread_RemoveTimerInternal_in_list);
+ for (auto& entry : mTimers) {
+ if (entry.Value() == &aTimer) {
+ entry.Forget();
+ return true;
+ }
+ }
+ MOZ_ASSERT(!aTimer.IsInTimerThread(),
+ "Not found in the list but it should be!?");
+ return false;
+}
+
+void TimerThread::RemoveLeadingCanceledTimersInternal() {
+ mMonitor.AssertCurrentThreadOwns();
+ AUTO_TIMERS_STATS(TimerThread_RemoveLeadingCanceledTimersInternal);
+
+ size_t toRemove = 0;
+ while (toRemove < mTimers.Length() && !mTimers[toRemove].Value()) {
+ ++toRemove;
+ }
+ mTimers.RemoveElementsAt(0, toRemove);
+}
+
+void TimerThread::RemoveFirstTimerInternal() {
+ mMonitor.AssertCurrentThreadOwns();
+ AUTO_TIMERS_STATS(TimerThread_RemoveFirstTimerInternal);
+ MOZ_ASSERT(!mTimers.IsEmpty());
+ mTimers.RemoveElementAt(0);
+}
+
+void TimerThread::PostTimerEvent(already_AddRefed<nsTimerImpl> aTimerRef) {
+ mMonitor.AssertCurrentThreadOwns();
+ AUTO_TIMERS_STATS(TimerThread_PostTimerEvent);
+
+ RefPtr<nsTimerImpl> timer(aTimerRef);
+
+#if TIMER_THREAD_STATISTICS
+ const double actualFiringDelay =
+ std::max((TimeStamp::Now() - timer->mTimeout).ToMilliseconds(), 0.0);
+ if (mNotified) {
+ ++mTotalTimersFiredNotified;
+ mTotalActualTimerFiringDelayNotified += actualFiringDelay;
+ } else {
+ ++mTotalTimersFiredUnnotified;
+ mTotalActualTimerFiringDelayUnnotified += actualFiringDelay;
+ }
+#endif
+
+ if (!timer->mEventTarget) {
+ NS_ERROR("Attempt to post timer event to NULL event target");
+ return;
+ }
+
+ // XXX we may want to reuse this nsTimerEvent in the case of repeating timers.
+
+ // Since we already addref'd 'timer', we don't need to addref here.
+ // We will release either in ~nsTimerEvent(), or pass the reference back to
+ // the caller. We need to copy the generation number from this timer into the
+ // event, so we can avoid firing a timer that was re-initialized after being
+ // canceled.
+
+ nsCOMPtr<nsIEventTarget> target = timer->mEventTarget;
+
+ void* p = nsTimerEvent::operator new(sizeof(nsTimerEvent));
+ if (!p) {
+ return;
+ }
+ RefPtr<nsTimerEvent> event =
+ ::new (KnownNotNull, p) nsTimerEvent(timer.forget(), mProfilerThreadId);
+
+ nsresult rv;
+ {
+ // We release mMonitor around the Dispatch because if the Dispatch interacts
+ // with the timer API we'll deadlock.
+ MonitorAutoUnlock unlock(mMonitor);
+ rv = target->Dispatch(event, NS_DISPATCH_NORMAL);
+ if (NS_FAILED(rv)) {
+ timer = event->ForgetTimer();
+ // We do this to avoid possible deadlock by taking the two locks in a
+ // different order than is used in RemoveTimer(). RemoveTimer() has
+ // aTimer->mMutex first. We use timer.get() to keep static analysis
+ // happy
+ // NOTE: I'm not sure that any of the below is actually necessary. It
+ // seems to me that the timer that we're trying to fire will have already
+ // been removed prior to this.
+ MutexAutoLock lock1(timer.get()->mMutex);
+ MonitorAutoLock lock2(mMonitor);
+ RemoveTimerInternal(*timer);
+ }
+ }
+}
+
+void TimerThread::DoBeforeSleep() {
+ // Mainthread
+ MonitorAutoLock lock(mMonitor);
+ mSleeping = true;
+}
+
+// Note: wake may be notified without preceding sleep notification
+void TimerThread::DoAfterSleep() {
+ // Mainthread
+ MonitorAutoLock lock(mMonitor);
+ mSleeping = false;
+
+ // Wake up the timer thread to re-process the array to ensure the sleep delay
+ // is correct, and fire any expired timers (perhaps quite a few)
+ mNotified = true;
+ PROFILER_MARKER_UNTYPED("AfterSleep", OTHER,
+ MarkerThreadId(mProfilerThreadId));
+ mMonitor.Notify();
+}
+
+NS_IMETHODIMP
+TimerThread::Observe(nsISupports* /* aSubject */, const char* aTopic,
+ const char16_t* /* aData */) {
+ if (StaticPrefs::timer_ignore_sleep_wake_notifications()) {
+ return NS_OK;
+ }
+
+ if (strcmp(aTopic, "sleep_notification") == 0 ||
+ strcmp(aTopic, "suspend_process_notification") == 0) {
+ DoBeforeSleep();
+ } else if (strcmp(aTopic, "wake_notification") == 0 ||
+ strcmp(aTopic, "resume_process_notification") == 0) {
+ DoAfterSleep();
+ }
+
+ return NS_OK;
+}
+
+uint32_t TimerThread::AllowedEarlyFiringMicroseconds() {
+ MonitorAutoLock lock(mMonitor);
+ return mAllowedEarlyFiringMicroseconds;
+}
+
+#if TIMER_THREAD_STATISTICS
+void TimerThread::PrintStatistics() const {
+ mMonitor.AssertCurrentThreadOwns();
+
+ const TimeStamp freshNow = TimeStamp::Now();
+ const double timeElapsed = mFirstTimerAdded.IsNull()
+ ? 0.0
+ : (freshNow - mFirstTimerAdded).ToSeconds();
+ printf_stderr("TimerThread Stats (Total time %8.2fs)\n", timeElapsed);
+
+ printf_stderr("Added: %6llu Removed: %6llu Fired: %6llu\n", mTotalTimersAdded,
+ mTotalTimersRemoved,
+ mTotalTimersFiredNotified + mTotalTimersFiredUnnotified);
+
+ auto PrintTimersFiredBucket =
+ [](const AutoTArray<size_t, sTimersFiredPerWakeupBucketCount>& buckets,
+ const size_t wakeupCount, const size_t timersFiredCount,
+ const double totalTimerDelay, const char* label) {
+ printf_stderr("%s : [", label);
+ for (size_t bucketVal : buckets) {
+ printf_stderr(" %5llu", bucketVal);
+ }
+ printf_stderr(
+ " ] Wake-ups/timer %6llu / %6llu (%7.4f) Avg Timer Delay %7.4f\n",
+ wakeupCount, timersFiredCount,
+ static_cast<double>(wakeupCount) / timersFiredCount,
+ totalTimerDelay / timersFiredCount);
+ };
+
+ printf_stderr("Wake-ups:\n");
+ PrintTimersFiredBucket(
+ mTimersFiredPerWakeup, mTotalWakeupCount,
+ mTotalTimersFiredNotified + mTotalTimersFiredUnnotified,
+ mTotalActualTimerFiringDelayNotified +
+ mTotalActualTimerFiringDelayUnnotified,
+ "Total ");
+ PrintTimersFiredBucket(mTimersFiredPerNotifiedWakeup,
+ mTotalNotifiedWakeupCount, mTotalTimersFiredNotified,
+ mTotalActualTimerFiringDelayNotified, "Notified ");
+ PrintTimersFiredBucket(mTimersFiredPerUnnotifiedWakeup,
+ mTotalUnnotifiedWakeupCount,
+ mTotalTimersFiredUnnotified,
+ mTotalActualTimerFiringDelayUnnotified, "Unnotified ");
+
+ printf_stderr("Early Wake-ups: %6llu Avg: %7.4fms\n", mEarlyWakeups,
+ mTotalEarlyWakeupTime / mEarlyWakeups);
+}
+#endif
+
+/* This nsReadOnlyTimer class is used for the values returned by the
+ * TimerThread::GetTimers method.
+ * It is not possible to return a strong reference to the nsTimerImpl
+ * instance (that could extend the lifetime of the timer and cause it to fire
+ * a callback pointing to already freed memory) or a weak reference
+ * (nsSupportsWeakReference doesn't support freeing the referee on a thread
+ * that isn't the thread that owns the weak reference), so instead the timer
+ * name, delay and type are copied to a new object. */
+class nsReadOnlyTimer final : public nsITimer {
+ public:
+ explicit nsReadOnlyTimer(const nsACString& aName, uint32_t aDelay,
+ uint32_t aType)
+ : mName(aName), mDelay(aDelay), mType(aType) {}
+ NS_DECL_ISUPPORTS
+
+ NS_IMETHOD Init(nsIObserver* aObserver, uint32_t aDelayInMs,
+ uint32_t aType) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD InitWithCallback(nsITimerCallback* aCallback, uint32_t aDelayInMs,
+ uint32_t aType) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD InitHighResolutionWithCallback(nsITimerCallback* aCallback,
+ const mozilla::TimeDuration& aDelay,
+ uint32_t aType) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD Cancel(void) override { return NS_ERROR_NOT_IMPLEMENTED; }
+ NS_IMETHOD InitWithNamedFuncCallback(nsTimerCallbackFunc aCallback,
+ void* aClosure, uint32_t aDelay,
+ uint32_t aType,
+ const char* aName) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD InitHighResolutionWithNamedFuncCallback(
+ nsTimerCallbackFunc aCallback, void* aClosure,
+ const mozilla::TimeDuration& aDelay, uint32_t aType,
+ const char* aName) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ NS_IMETHOD GetName(nsACString& aName) override {
+ aName = mName;
+ return NS_OK;
+ }
+ NS_IMETHOD GetDelay(uint32_t* aDelay) override {
+ *aDelay = mDelay;
+ return NS_OK;
+ }
+ NS_IMETHOD SetDelay(uint32_t aDelay) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD GetType(uint32_t* aType) override {
+ *aType = mType;
+ return NS_OK;
+ }
+ NS_IMETHOD SetType(uint32_t aType) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD GetClosure(void** aClosure) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD GetCallback(nsITimerCallback** aCallback) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD GetTarget(nsIEventTarget** aTarget) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD SetTarget(nsIEventTarget* aTarget) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ NS_IMETHOD GetAllowedEarlyFiringMicroseconds(
+ uint32_t* aAllowedEarlyFiringMicroseconds) override {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) override {
+ return sizeof(*this);
+ }
+
+ private:
+ nsCString mName;
+ uint32_t mDelay;
+ uint32_t mType;
+ ~nsReadOnlyTimer() = default;
+};
+
+NS_IMPL_ISUPPORTS(nsReadOnlyTimer, nsITimer)
+
+nsresult TimerThread::GetTimers(nsTArray<RefPtr<nsITimer>>& aRetVal) {
+ nsTArray<RefPtr<nsTimerImpl>> timers;
+ {
+ MonitorAutoLock lock(mMonitor);
+ for (const auto& entry : mTimers) {
+ nsTimerImpl* timer = entry.Value();
+ if (!timer) {
+ continue;
+ }
+ timers.AppendElement(timer);
+ }
+ }
+
+ for (nsTimerImpl* timer : timers) {
+ nsAutoCString name;
+ timer->GetName(name);
+
+ uint32_t delay;
+ timer->GetDelay(&delay);
+
+ uint32_t type;
+ timer->GetType(&type);
+
+ aRetVal.AppendElement(new nsReadOnlyTimer(name, delay, type));
+ }
+
+ return NS_OK;
+}
diff --git a/xpcom/threads/TimerThread.h b/xpcom/threads/TimerThread.h
new file mode 100644
index 0000000000..ec138efca6
--- /dev/null
+++ b/xpcom/threads/TimerThread.h
@@ -0,0 +1,243 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef TimerThread_h___
+#define TimerThread_h___
+
+#include "nsIObserver.h"
+#include "nsIRunnable.h"
+#include "nsIThread.h"
+
+#include "nsTimerImpl.h"
+#include "nsThreadUtils.h"
+
+#include "nsTArray.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/ProfilerUtils.h"
+#include "mozilla/UniquePtr.h"
+
+#include <algorithm>
+
+namespace mozilla {
+class TimeStamp;
+} // namespace mozilla
+
+// Enable this to compute lots of interesting statistics and print them out when
+// PrintStatistics() is called.
+#define TIMER_THREAD_STATISTICS 0
+
+class TimerThread final : public mozilla::Runnable, public nsIObserver {
+ public:
+ typedef mozilla::Monitor Monitor;
+ typedef mozilla::MutexAutoLock MutexAutoLock;
+ typedef mozilla::TimeStamp TimeStamp;
+ typedef mozilla::TimeDuration TimeDuration;
+
+ TimerThread();
+
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_NSIRUNNABLE
+ NS_DECL_NSIOBSERVER
+
+ nsresult Shutdown();
+
+ nsresult AddTimer(nsTimerImpl* aTimer, const MutexAutoLock& aProofOfLock)
+ MOZ_REQUIRES(aTimer->mMutex);
+ nsresult RemoveTimer(nsTimerImpl* aTimer, const MutexAutoLock& aProofOfLock)
+ MOZ_REQUIRES(aTimer->mMutex);
+ // Considering only the first 'aSearchBound' timers (in firing order), returns
+ // the timeout of the first non-low-priority timer, on the current thread,
+ // that will fire before 'aDefault'. If no such timer exists, 'aDefault' is
+ // returned.
+ TimeStamp FindNextFireTimeForCurrentThread(TimeStamp aDefault,
+ uint32_t aSearchBound);
+
+ void DoBeforeSleep();
+ void DoAfterSleep();
+
+ bool IsOnTimerThread() const { return mThread->IsOnCurrentThread(); }
+
+ uint32_t AllowedEarlyFiringMicroseconds();
+ nsresult GetTimers(nsTArray<RefPtr<nsITimer>>& aRetVal);
+
+ private:
+ ~TimerThread();
+
+ bool mInitialized;
+
+ // These internal helper methods must be called while mMonitor is held.
+ // AddTimerInternal returns false if the insertion failed.
+ bool AddTimerInternal(nsTimerImpl& aTimer) MOZ_REQUIRES(mMonitor);
+ bool RemoveTimerInternal(nsTimerImpl& aTimer)
+ MOZ_REQUIRES(mMonitor, aTimer.mMutex);
+ void RemoveLeadingCanceledTimersInternal() MOZ_REQUIRES(mMonitor);
+ void RemoveFirstTimerInternal() MOZ_REQUIRES(mMonitor);
+ nsresult Init() MOZ_REQUIRES(mMonitor);
+
+ void PostTimerEvent(already_AddRefed<nsTimerImpl> aTimerRef)
+ MOZ_REQUIRES(mMonitor);
+
+ nsCOMPtr<nsIThread> mThread;
+ // Lock ordering requirements:
+ // (optional) ThreadWrapper::sMutex ->
+ // (optional) nsTimerImpl::mMutex ->
+ // TimerThread::mMonitor
+ Monitor mMonitor;
+
+ bool mShutdown MOZ_GUARDED_BY(mMonitor);
+ bool mWaiting MOZ_GUARDED_BY(mMonitor);
+ bool mNotified MOZ_GUARDED_BY(mMonitor);
+ bool mSleeping MOZ_GUARDED_BY(mMonitor);
+
+ class Entry final {
+ public:
+ explicit Entry(nsTimerImpl& aTimerImpl)
+ : mTimeout(aTimerImpl.mTimeout),
+ mDelay(aTimerImpl.mDelay),
+ mTimerImpl(&aTimerImpl) {
+ aTimerImpl.SetIsInTimerThread(true);
+ }
+
+ // Create an already-canceled entry with the given timeout.
+ explicit Entry(TimeStamp aTimeout)
+ : mTimeout(std::move(aTimeout)), mTimerImpl(nullptr) {}
+
+ // Don't allow copies, otherwise which one would manage `IsInTimerThread`?
+ Entry(const Entry&) = delete;
+ Entry& operator=(const Entry&) = delete;
+
+ // Move-only.
+ Entry(Entry&&) = default;
+ Entry& operator=(Entry&&) = default;
+
+ ~Entry() {
+ if (mTimerImpl) {
+ mTimerImpl->mMutex.AssertCurrentThreadOwns();
+ mTimerImpl->SetIsInTimerThread(false);
+ }
+ }
+
+ nsTimerImpl* Value() const { return mTimerImpl; }
+
+ void Forget() {
+ if (MOZ_UNLIKELY(!mTimerImpl)) {
+ return;
+ }
+ mTimerImpl->mMutex.AssertCurrentThreadOwns();
+ mTimerImpl->SetIsInTimerThread(false);
+ mTimerImpl = nullptr;
+ }
+
+ // Called with the Monitor held, but not the TimerImpl's mutex
+ already_AddRefed<nsTimerImpl> Take() {
+ if (MOZ_LIKELY(mTimerImpl)) {
+ MOZ_ASSERT(mTimerImpl->IsInTimerThread());
+ mTimerImpl->SetIsInTimerThread(false);
+ }
+ return mTimerImpl.forget();
+ }
+
+ const TimeStamp& Timeout() const { return mTimeout; }
+ const TimeDuration& Delay() const { return mDelay; }
+
+ private:
+ // These values are simply cached from the timer. Keeping them here is good
+ // for cache usage and allows us to avoid worrying about locking conflicts
+ // with the timer.
+ TimeStamp mTimeout;
+ TimeDuration mDelay;
+
+ RefPtr<nsTimerImpl> mTimerImpl;
+ };
+
+ // Computes and returns the index in mTimers at which a new timer with the
+ // specified timeout should be inserted in order to maintain "sorted" order.
+ size_t ComputeTimerInsertionIndex(const TimeStamp& timeout) const
+ MOZ_REQUIRES(mMonitor);
+
+ // Computes and returns when we should next try to wake up in order to handle
+ // the triggering of the timers in mTimers. Currently this is very simple and
+ // we always just plan to wake up for the next timer in the list. In the
+ // future this will be more sophisticated.
+ TimeStamp ComputeWakeupTimeFromTimers() const MOZ_REQUIRES(mMonitor);
+
+ // Computes how late a timer can acceptably fire.
+ // timerDuration is the duration of the timer whose delay we are calculating.
+ // Longer timers can tolerate longer firing delays.
+ // minDelay is an amount by which any timer can be delayed.
+ // This function will never return a value smaller than minDelay (unless this
+ // conflicts with maxDelay). maxDelay is the upper limit on the amount by
+ // which we will ever delay any timer. Takes precedence over minDelay if there
+ // is a conflict. (Zero will effectively disable timer coalescing.)
+ TimeDuration ComputeAcceptableFiringDelay(TimeDuration timerDuration,
+ TimeDuration minDelay,
+ TimeDuration maxDelay) const;
+
+#ifdef DEBUG
+ // Checks mTimers to see if any entries are out of order or any cached
+ // timeouts are incorrect and will assert if any inconsistency is found. Has
+ // no side effects other than asserting so has no use in non-DEBUG builds.
+ void VerifyTimerListConsistency() const MOZ_REQUIRES(mMonitor);
+#endif
+
+ // mTimers is maintained in a "pseudo-sorted" order wrt the timeouts.
+ // Specifcally, mTimers is sorted according to the timeouts *if you ignore the
+ // canceled entries* (those whose mTimerImpl is nullptr). Notably this means
+ // that you cannot use a binary search on this list.
+ nsTArray<Entry> mTimers MOZ_GUARDED_BY(mMonitor);
+
+ // Set only at the start of the thread's Run():
+ uint32_t mAllowedEarlyFiringMicroseconds MOZ_GUARDED_BY(mMonitor);
+
+ ProfilerThreadId mProfilerThreadId MOZ_GUARDED_BY(mMonitor);
+
+ // Time at which we were intending to wake up the last time that we slept.
+ // Is "null" if we have never slept or if our last sleep was "forever".
+ TimeStamp mIntendedWakeupTime;
+
+#if TIMER_THREAD_STATISTICS
+ static constexpr size_t sTimersFiredPerWakeupBucketCount = 16;
+ static inline constexpr std::array<size_t, sTimersFiredPerWakeupBucketCount>
+ sTimersFiredPerWakeupThresholds = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 20, 30, 40, 50, 70, (size_t)(-1)};
+
+ mutable AutoTArray<size_t, sTimersFiredPerWakeupBucketCount>
+ mTimersFiredPerWakeup MOZ_GUARDED_BY(mMonitor) = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ mutable AutoTArray<size_t, sTimersFiredPerWakeupBucketCount>
+ mTimersFiredPerUnnotifiedWakeup MOZ_GUARDED_BY(mMonitor) = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ mutable AutoTArray<size_t, sTimersFiredPerWakeupBucketCount>
+ mTimersFiredPerNotifiedWakeup MOZ_GUARDED_BY(mMonitor) = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+ mutable size_t mTotalTimersAdded MOZ_GUARDED_BY(mMonitor) = 0;
+ mutable size_t mTotalTimersRemoved MOZ_GUARDED_BY(mMonitor) = 0;
+ mutable size_t mTotalTimersFiredNotified MOZ_GUARDED_BY(mMonitor) = 0;
+ mutable size_t mTotalTimersFiredUnnotified MOZ_GUARDED_BY(mMonitor) = 0;
+
+ mutable size_t mTotalWakeupCount MOZ_GUARDED_BY(mMonitor) = 0;
+ mutable size_t mTotalUnnotifiedWakeupCount MOZ_GUARDED_BY(mMonitor) = 0;
+ mutable size_t mTotalNotifiedWakeupCount MOZ_GUARDED_BY(mMonitor) = 0;
+
+ mutable double mTotalActualTimerFiringDelayNotified MOZ_GUARDED_BY(mMonitor) =
+ 0.0;
+ mutable double mTotalActualTimerFiringDelayUnnotified
+ MOZ_GUARDED_BY(mMonitor) = 0.0;
+
+ mutable TimeStamp mFirstTimerAdded MOZ_GUARDED_BY(mMonitor);
+
+ mutable size_t mEarlyWakeups MOZ_GUARDED_BY(mMonitor) = 0;
+ mutable double mTotalEarlyWakeupTime MOZ_GUARDED_BY(mMonitor) = 0.0;
+
+ void PrintStatistics() const;
+#endif
+};
+
+#endif /* TimerThread_h___ */
diff --git a/xpcom/threads/VsyncTaskManager.cpp b/xpcom/threads/VsyncTaskManager.cpp
new file mode 100644
index 0000000000..ba4201af45
--- /dev/null
+++ b/xpcom/threads/VsyncTaskManager.cpp
@@ -0,0 +1,22 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "VsyncTaskManager.h"
+#include "InputTaskManager.h"
+
+namespace mozilla {
+
+StaticRefPtr<VsyncTaskManager> VsyncTaskManager::gHighPriorityTaskManager;
+
+void VsyncTaskManager::Init() {
+ gHighPriorityTaskManager = new VsyncTaskManager();
+}
+
+void VsyncTaskManager::WillRunTask() {
+ TaskManager::WillRunTask();
+ InputTaskManager::Get()->NotifyVsync();
+};
+} // namespace mozilla
diff --git a/xpcom/threads/VsyncTaskManager.h b/xpcom/threads/VsyncTaskManager.h
new file mode 100644
index 0000000000..e284ebf47b
--- /dev/null
+++ b/xpcom/threads/VsyncTaskManager.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_VsyncTaskManager_h
+#define mozilla_VsyncTaskManager_h
+
+#include "TaskController.h"
+#include "mozilla/StaticPtr.h"
+
+namespace mozilla {
+class VsyncTaskManager : public TaskManager {
+ public:
+ static VsyncTaskManager* Get() { return gHighPriorityTaskManager.get(); }
+ static void Cleanup() { gHighPriorityTaskManager = nullptr; }
+ static void Init();
+
+ void WillRunTask() override;
+
+ private:
+ static StaticRefPtr<VsyncTaskManager> gHighPriorityTaskManager;
+};
+} // namespace mozilla
+#endif
diff --git a/xpcom/threads/WinHandleWatcher.cpp b/xpcom/threads/WinHandleWatcher.cpp
new file mode 100644
index 0000000000..07d49e3730
--- /dev/null
+++ b/xpcom/threads/WinHandleWatcher.cpp
@@ -0,0 +1,303 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <windows.h>
+#include <threadpoolapiset.h>
+
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Logging.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/ThreadSafety.h"
+#include "mozilla/WinHandleWatcher.h"
+
+#include "nsCOMPtr.h"
+#include "nsIRunnable.h"
+#include "nsISerialEventTarget.h"
+#include "nsISupportsImpl.h"
+#include "nsITargetShutdownTask.h"
+#include "nsIWeakReferenceUtils.h"
+#include "nsThreadUtils.h"
+
+mozilla::LazyLogModule sHWLog("HandleWatcher");
+
+namespace mozilla {
+namespace details {
+struct WaitHandleDeleter {
+ void operator()(PTP_WAIT waitHandle) {
+ MOZ_LOG(sHWLog, LogLevel::Debug, ("Closing PTP_WAIT %p", waitHandle));
+ ::CloseThreadpoolWait(waitHandle);
+ }
+};
+} // namespace details
+using WaitHandlePtr = UniquePtr<TP_WAIT, details::WaitHandleDeleter>;
+
+// HandleWatcher::Impl
+//
+// The backing implementation of HandleWatcher is a PTP_WAIT, an OS-threadpool
+// wait-object. Windows doesn't actually create a new thread per wait-object;
+// OS-threadpool threads are assigned to wait-objects only when their associated
+// handle become signaled -- although explicit documentation of this fact is
+// somewhat obscurely placed. [1]
+//
+// Throughout this class, we use manual locking and unlocking guarded by Clang's
+// thread-safety warnings, rather than scope-based lock-guards. See `Replace()`
+// for an explanation and justification.
+//
+// [1]https://learn.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-waitformultipleobjects#remarks
+class HandleWatcher::Impl final : public nsITargetShutdownTask {
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ public:
+ Impl() = default;
+
+ private:
+ ~Impl() { MOZ_ASSERT(IsStopped()); }
+
+ struct Data {
+ // The watched handle and its callback.
+ HANDLE handle;
+ RefPtr<nsIEventTarget> target;
+ nsCOMPtr<nsIRunnable> runnable;
+
+ // Handle to the threadpool wait-object.
+ WaitHandlePtr waitHandle;
+ // A pointer to ourselves, notionally owned by the wait-object.
+ RefPtr<Impl> self;
+
+ // (We can't actually do this because a) it has annoying consequences in
+ // C++20 thanks to P1008R1, and b) Clang just ignores it anyway.)
+ //
+ // ~Data() MOZ_EXCLUDES(mMutex) = default;
+ };
+
+ mozilla::Mutex mMutex{"HandleWatcher::Impl"};
+ Data mData MOZ_GUARDED_BY(mMutex) = {};
+
+ // Callback from OS threadpool wait-object.
+ static void CALLBACK WaitCallback(PTP_CALLBACK_INSTANCE, void* ctx,
+ PTP_WAIT aWaitHandle,
+ TP_WAIT_RESULT aResult) {
+ static_cast<Impl*>(ctx)->OnWaitCompleted(aWaitHandle, aResult);
+ }
+
+ void OnWaitCompleted(PTP_WAIT aWaitHandle, TP_WAIT_RESULT aResult)
+ MOZ_EXCLUDES(mMutex) {
+ MOZ_ASSERT(aResult == WAIT_OBJECT_0);
+
+ mMutex.Lock();
+ // If this callback is no longer the active callback, skip out.
+ // All cleanup is someone else's problem.
+ if (aWaitHandle != mData.waitHandle.get()) {
+ MOZ_LOG(sHWLog, LogLevel::Debug,
+ ("Recv'd already-stopped callback: HW %p | PTP_WAIT %p", this,
+ aWaitHandle));
+ mMutex.Unlock();
+ return;
+ }
+
+ // Take our self-pointer so that we release it on exit.
+ RefPtr<Impl> self = std::move(mData.self);
+
+ MOZ_LOG(sHWLog, LogLevel::Info,
+ ("Recv'd callback: HW %p | handle %p | target %p | PTP_WAIT %p",
+ this, mData.handle, mData.target.get(), aWaitHandle));
+
+ // This may fail if (for example) `mData.target` is being shut down, but we
+ // have not yet received the shutdown callback.
+ mData.target->Dispatch(mData.runnable.forget());
+ Replace(Data{});
+ }
+
+ public:
+ static RefPtr<Impl> Create(HANDLE aHandle, nsIEventTarget* aTarget,
+ already_AddRefed<nsIRunnable> aRunnable) {
+ auto impl = MakeRefPtr<Impl>();
+ bool const ok [[maybe_unused]] =
+ impl->Watch(aHandle, aTarget, std::move(aRunnable));
+ MOZ_ASSERT(ok);
+ return impl;
+ }
+
+ private:
+ bool Watch(HANDLE aHandle, nsIEventTarget* aTarget,
+ already_AddRefed<nsIRunnable> aRunnable) MOZ_EXCLUDES(mMutex) {
+ MOZ_ASSERT(aHandle);
+ MOZ_ASSERT(aTarget);
+
+ RefPtr<nsIEventTarget> target(aTarget);
+
+ WaitHandlePtr waitHandle{
+ ::CreateThreadpoolWait(&WaitCallback, this, nullptr)};
+ if (!waitHandle) {
+ return false;
+ }
+
+ {
+ mMutex.Lock();
+
+ nsresult const ret = aTarget->RegisterShutdownTask(this);
+ if (NS_FAILED(ret)) {
+ mMutex.Unlock();
+ return false;
+ }
+
+ MOZ_LOG(sHWLog, LogLevel::Info,
+ ("Setting callback: HW %p | handle %p | target %p | PTP_WAIT %p",
+ this, aHandle, aTarget, waitHandle.get()));
+
+ // returns `void`; presumably always succeeds given a successful
+ // `::CreateThreadpoolWait()`
+ ::SetThreadpoolWait(waitHandle.get(), aHandle, nullptr);
+ // After this point, you must call `FlushWaitHandle(waitHandle.get())`
+ // before destroying the wait handle. (Note that this must be done while
+ // *not* holding `mMutex`!)
+
+ Replace(Data{.handle = aHandle,
+ .target = std::move(target),
+ .runnable = aRunnable,
+ .waitHandle = std::move(waitHandle),
+ .self = this});
+ }
+
+ return true;
+ }
+
+ void TargetShutdown() MOZ_EXCLUDES(mMutex) override final {
+ mMutex.Lock();
+
+ MOZ_LOG(sHWLog, LogLevel::Debug,
+ ("Target shutdown: HW %p | handle %p | target %p | PTP_WAIT %p",
+ this, mData.handle, mData.target.get(), mData.waitHandle.get()));
+
+ // Clear mData.target, since there's no need to unregister the shutdown task
+ // anymore. Hold onto it until we release the mutex, though, to avoid any
+ // reentrancy issues.
+ //
+ // This is more for internal consistency than safety: someone has to be
+ // shutting `target` down, and that someone isn't us, so there's necessarily
+ // another reference out there. (Although decrementing the refcount might
+ // still have arbitrary effects if someone's been excessively clever with
+ // nsISupports::Release...)
+ auto const oldTarget = std::move(mData.target);
+ Replace(Data{});
+ // (Static-assert that the mutex has indeed been released.)
+ ([&]() MOZ_EXCLUDES(mMutex) {})();
+ }
+
+ public:
+ void Stop() MOZ_EXCLUDES(mMutex) {
+ mMutex.Lock();
+ Replace(Data{});
+ }
+
+ bool IsStopped() MOZ_EXCLUDES(mMutex) {
+ mozilla::MutexAutoLock lock(mMutex);
+ return !mData.handle;
+ }
+
+ private:
+ // Throughout this class, we use manual locking and unlocking guarded by
+ // Clang's thread-safety warnings, rather than scope-based lock-guards. This
+ // is largely driven by `Replace()`, below, which performs both operations
+ // which require the mutex to be held and operations which require it to not
+ // be held, and therefore must explicitly sequence the mutex release.
+ //
+ // These explicit locks, unlocks, and annotations are both alien to C++ and
+ // offensively tedious; but they _are_ still checked for state consistency at
+ // scope boundaries. (The concerned reader is invited to test this by
+ // deliberately removing an `mMutex.Unlock()` call from anywhere in the class
+ // and viewing the resultant compiler diagnostics.)
+ //
+ // A more principled, or at least differently-principled, implementation might
+ // create a scope-based lock-guard and pass it to `Replace()` to dispose of at
+ // the proper time. Alas, it cannot be communicated to Clang's thread-safety
+ // checker that such a guard is associated with `mMutex`.
+ //
+ void Replace(Data&& aData) MOZ_CAPABILITY_RELEASE(mMutex) {
+ // either both handles are NULL, or neither is
+ MOZ_ASSERT(!!aData.handle == !!aData.waitHandle);
+
+ if (mData.handle) {
+ MOZ_LOG(sHWLog, LogLevel::Info,
+ ("Stop callback: HW %p | handle %p | target %p | PTP_WAIT %p",
+ this, mData.handle, mData.target.get(), mData.waitHandle.get()));
+ }
+
+ if (mData.target) {
+ mData.target->UnregisterShutdownTask(this);
+ }
+
+ // Extract the old data and insert the new -- but hold onto the old data for
+ // now. (See [1] and [2], below.)
+ Data oldData = std::exchange(mData, std::move(aData));
+
+ ////////////////////////////////////////////////////////////////////////////
+ // Release the mutex.
+ mMutex.Unlock();
+ ////////////////////////////////////////////////////////////////////////////
+
+ // [1] `oldData.self` will be unset if the old callback already ran (or if
+ // there was no old callback in the first place). If it's set, though, we
+ // need to explicitly clear out the wait-object first.
+ if (oldData.self) {
+ MOZ_ASSERT(oldData.waitHandle);
+ FlushWaitHandle(oldData.waitHandle.get());
+ }
+
+ // [2] oldData also includes several other reference-counted pointers. It's
+ // possible that these may be the last pointer to something, so releasing
+ // them may have arbitrary side-effects -- like calling this->Stop(), which
+ // will try to reacquire the mutex.
+ //
+ // Now that we've released the mutex, we can (implicitly) release them all
+ // here.
+ }
+
+ // Either confirm as complete or cancel any callbacks on aWaitHandle. Block
+ // until this is done. (See documentation for ::CloseThreadpoolWait().)
+ void FlushWaitHandle(PTP_WAIT aWaitHandle) MOZ_EXCLUDES(mMutex) {
+ ::SetThreadpoolWait(aWaitHandle, nullptr, nullptr);
+ // This might block on `OnWaitCompleted()`, so we can't hold `mMutex` here.
+ ::WaitForThreadpoolWaitCallbacks(aWaitHandle, TRUE);
+ // ::CloseThreadpoolWait() itself is the caller's responsibility.
+ }
+};
+
+NS_IMPL_ISUPPORTS(HandleWatcher::Impl, nsITargetShutdownTask)
+
+//////
+// HandleWatcher member function implementations
+
+HandleWatcher::HandleWatcher() : mImpl{} {}
+HandleWatcher::~HandleWatcher() {
+ if (mImpl) {
+ MOZ_ASSERT(mImpl->IsStopped());
+ mImpl->Stop(); // just in case, in release
+ }
+}
+
+void HandleWatcher::Watch(HANDLE aHandle, nsIEventTarget* aTarget,
+ already_AddRefed<nsIRunnable> aRunnable) {
+ auto impl = Impl::Create(aHandle, aTarget, std::move(aRunnable));
+ MOZ_ASSERT(impl);
+
+ if (mImpl) {
+ mImpl->Stop();
+ }
+ mImpl = std::move(impl);
+}
+
+void HandleWatcher::Stop() {
+ if (mImpl) {
+ mImpl->Stop();
+ }
+}
+
+bool HandleWatcher::IsStopped() { return !mImpl || mImpl->IsStopped(); }
+
+} // namespace mozilla
diff --git a/xpcom/threads/WinHandleWatcher.h b/xpcom/threads/WinHandleWatcher.h
new file mode 100644
index 0000000000..3e9b4f1f22
--- /dev/null
+++ b/xpcom/threads/WinHandleWatcher.h
@@ -0,0 +1,117 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WinHandleWatcher_h__
+#define WinHandleWatcher_h__
+
+#include <minwindef.h>
+
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
+
+#include "nsIEventTarget.h"
+#include "nsIRunnable.h"
+#include "nsIThread.h"
+#include "nsThreadUtils.h"
+
+namespace mozilla {
+///////////////////////////////////////////////////////////////////////
+// HandleWatcher
+//
+// Enqueues a task onto an event target when a watched Win32 synchronization
+// object [1] enters the signaled state.
+//
+// The HandleWatcher must be stopped before either it or the synchronization
+// object is destroyed.
+//
+//////
+//
+// Example of use:
+//
+// ```
+// class MyClass {
+// /* ... */
+//
+// HANDLE CreateThing();
+// void OnComplete();
+// public:
+// void Fire() {
+// mHandle.set(CreateThing());
+// mWatcher.Watch(
+// mHandle.get(), NS_GetCurrentThread(), // (or any other thread)
+// NS_NewRunnableFunction("OnComplete", [this] { OnComplete(); }));
+// }
+//
+// ~MyClass() { mWatcher.Stop(); }
+//
+// HandleWatcher mWatcher;
+// HandlePtr mHandle; // calls ::CloseHandle() on destruction
+// };
+// ```
+//
+// Note: this example demonstrates why an explicit `Stop()` is necessary in
+// MyClass's destructor. Without it, the `HandlePtr` would destroy the HANDLE --
+// and possibly whatever other data `OnComplete()` depends on -- before the
+// watch was stopped!
+//
+// Rather than make code correctness silently dependent on member object order,
+// we require that HandleWatcher already be stopped at its destruction time.
+// (This does not guarantee correctness, as the task may still reference a
+// partially-destroyed transitive owner; but, short of RIIR, a guarantee of
+// correctness is probably not possible here.)
+//
+//////
+//
+// [1]https://learn.microsoft.com/en-us/windows/win32/sync/synchronization-objects
+class HandleWatcher {
+ public:
+ class Impl;
+
+ HandleWatcher();
+ ~HandleWatcher();
+
+ HandleWatcher(HandleWatcher const&) = delete;
+ HandleWatcher& operator=(HandleWatcher const&) = delete;
+
+ HandleWatcher(HandleWatcher&&) = default;
+ HandleWatcher& operator=(HandleWatcher&&) = default;
+
+ // Watches the given Win32 HANDLE, which must be a synchronization object. As
+ // soon as the HANDLE is signaled, posts `aRunnable` to `aTarget`.
+ //
+ // `aHandle` is merely borrowed for the duration of the watch: the
+ // HandleWatcher does not attempt to close it, and its lifetime must exceed
+ // that of the watch.
+ //
+ // If the watch is stopped for any reason other than completion, `aRunnable`
+ // is released immediately, on the same thread from which the Watch was
+ // stopped.
+ //
+ // The watch is stopped when any of the following occurs:
+ // * `Stop()` is called.
+ // * `Watch()` is called again, even without an intervening `Stop()`.
+ // * This object is destroyed.
+ // * `aTarget` shuts down.
+ // * `aHandle` becomes signaled.
+ //
+ void Watch(HANDLE aHandle, nsIEventTarget* aTarget,
+ already_AddRefed<nsIRunnable> aRunnable);
+
+ // Cancels the current watch, if any.
+ //
+ // Idempotent. Thread-safe with respect to other calls of `Stop()`.
+ void Stop();
+
+ // Potentially racy. Only intended for tests.
+ bool IsStopped();
+
+ private:
+ RefPtr<Impl> mImpl;
+};
+} // namespace mozilla
+
+#endif // WinHandleWatcher_h__
diff --git a/xpcom/threads/components.conf b/xpcom/threads/components.conf
new file mode 100644
index 0000000000..53f76d3b89
--- /dev/null
+++ b/xpcom/threads/components.conf
@@ -0,0 +1,29 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+Classes = [
+ {
+ 'cid': '{03d68f92-9513-4e25-9be9-7cb239874172}',
+ 'contract_ids': ['@mozilla.org/process/environment;1'],
+ 'legacy_constructor': 'nsEnvironment::Create',
+ 'headers': ['/xpcom/threads/nsEnvironment.h'],
+ 'js_name': 'env',
+ 'interfaces': ['nsIEnvironment'],
+ },
+ {
+ 'cid': '{5ff24248-1dd2-11b2-8427-fbab44f29bc8}',
+ 'contract_ids': ['@mozilla.org/timer;1'],
+ 'legacy_constructor': 'nsTimer::XPCOMConstructor',
+ 'headers': ['/xpcom/threads/nsTimerImpl.h'],
+ 'processes': ProcessSelector.ALLOW_IN_GPU_RDD_SOCKET_AND_UTILITY_PROCESS,
+ },
+ {
+ 'cid': '{d39a8904-2e09-4a3a-a273-c3bec7db2bfe}',
+ 'contract_ids': ['@mozilla.org/timer-manager;1'],
+ 'headers': ['/xpcom/threads/nsTimerImpl.h'],
+ 'type': 'nsTimerManager',
+ },
+]
diff --git a/xpcom/threads/moz.build b/xpcom/threads/moz.build
new file mode 100644
index 0000000000..efdbb47304
--- /dev/null
+++ b/xpcom/threads/moz.build
@@ -0,0 +1,148 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+XPIDL_SOURCES += [
+ "nsIDirectTaskDispatcher.idl",
+ "nsIEnvironment.idl",
+ "nsIEventTarget.idl",
+ "nsIIdlePeriod.idl",
+ "nsINamed.idl",
+ "nsIProcess.idl",
+ "nsIRunnable.idl",
+ "nsISerialEventTarget.idl",
+ "nsISupportsPriority.idl",
+ "nsIThread.idl",
+ "nsIThreadInternal.idl",
+ "nsIThreadManager.idl",
+ "nsIThreadPool.idl",
+ "nsIThreadShutdown.idl",
+ "nsITimer.idl",
+]
+
+XPIDL_MODULE = "xpcom_threads"
+
+XPCOM_MANIFESTS += [
+ "components.conf",
+]
+
+EXPORTS += [
+ "MainThreadUtils.h",
+ "nsICancelableRunnable.h",
+ "nsIDiscardableRunnable.h",
+ "nsIIdleRunnable.h",
+ "nsITargetShutdownTask.h",
+ "nsMemoryPressure.h",
+ "nsProcess.h",
+ "nsProxyRelease.h",
+ "nsThread.h",
+ "nsThreadManager.h",
+ "nsThreadPool.h",
+ "nsThreadUtils.h",
+]
+
+EXPORTS.mozilla += [
+ "AbstractThread.h",
+ "BlockingResourceBase.h",
+ "CondVar.h",
+ "CPUUsageWatcher.h",
+ "DataMutex.h",
+ "DeadlockDetector.h",
+ "DelayedRunnable.h",
+ "EventQueue.h",
+ "EventTargetCapability.h",
+ "IdlePeriodState.h",
+ "IdleTaskRunner.h",
+ "InputTaskManager.h",
+ "LazyIdleThread.h",
+ "MainThreadIdlePeriod.h",
+ "Monitor.h",
+ "MozPromise.h",
+ "MozPromiseInlines.h",
+ "Mutex.h",
+ "PerformanceCounter.h",
+ "Queue.h",
+ "RecursiveMutex.h",
+ "ReentrantMonitor.h",
+ "RWLock.h",
+ "SchedulerGroup.h",
+ "SharedThreadPool.h",
+ "SpinEventLoopUntil.h",
+ "StateMirroring.h",
+ "StateWatching.h",
+ "SynchronizedEventQueue.h",
+ "SyncRunnable.h",
+ "TaskCategory.h",
+ "TaskController.h",
+ "TaskDispatcher.h",
+ "TaskQueue.h",
+ "ThreadBound.h",
+ "ThreadEventQueue.h",
+ "ThrottledEventQueue.h",
+ "VsyncTaskManager.h",
+]
+
+SOURCES += [
+ "IdleTaskRunner.cpp",
+ "ThreadDelay.cpp",
+]
+
+UNIFIED_SOURCES += [
+ "AbstractThread.cpp",
+ "BlockingResourceBase.cpp",
+ "CPUUsageWatcher.cpp",
+ "DelayedRunnable.cpp",
+ "EventQueue.cpp",
+ "IdlePeriodState.cpp",
+ "InputTaskManager.cpp",
+ "LazyIdleThread.cpp",
+ "MainThreadIdlePeriod.cpp",
+ "nsEnvironment.cpp",
+ "nsMemoryPressure.cpp",
+ "nsProcessCommon.cpp",
+ "nsProxyRelease.cpp",
+ "nsThread.cpp",
+ "nsThreadManager.cpp",
+ "nsThreadPool.cpp",
+ "nsThreadUtils.cpp",
+ "nsTimerImpl.cpp",
+ "PerformanceCounter.cpp",
+ "RecursiveMutex.cpp",
+ "RWLock.cpp",
+ "SchedulerGroup.cpp",
+ "SharedThreadPool.cpp",
+ "SynchronizedEventQueue.cpp",
+ "TaskController.cpp",
+ "TaskQueue.cpp",
+ "ThreadEventQueue.cpp",
+ "ThreadEventTarget.cpp",
+ "ThreadLocalVariables.cpp",
+ "ThrottledEventQueue.cpp",
+ "TimerThread.cpp",
+ "VsyncTaskManager.cpp",
+]
+
+if CONFIG["OS_ARCH"] == "WINNT":
+ EXPORTS.mozilla += ["WinHandleWatcher.h"]
+ UNIFIED_SOURCES += ["WinHandleWatcher.cpp"]
+
+# Should match the conditions in toolkit/components/backgroundhangmonitor/moz.build
+if (
+ CONFIG["NIGHTLY_BUILD"]
+ and not CONFIG["MOZ_DEBUG"]
+ and not CONFIG["MOZ_TSAN"]
+ and not CONFIG["MOZ_ASAN"]
+):
+ DEFINES["MOZ_ENABLE_BACKGROUND_HANG_MONITOR"] = 1
+
+LOCAL_INCLUDES += [
+ "../build",
+ "/caps",
+ "/tools/profiler",
+]
+
+FINAL_LIBRARY = "xul"
+
+include("/ipc/chromium/chromium-config.mozbuild")
diff --git a/xpcom/threads/nsEnvironment.cpp b/xpcom/threads/nsEnvironment.cpp
new file mode 100644
index 0000000000..54efd9194a
--- /dev/null
+++ b/xpcom/threads/nsEnvironment.cpp
@@ -0,0 +1,136 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsEnvironment.h"
+#include "prenv.h"
+#include "nsBaseHashtable.h"
+#include "nsHashKeys.h"
+#include "nsPromiseFlatString.h"
+#include "nsDependentString.h"
+#include "nsNativeCharsetUtils.h"
+#include "mozilla/Printf.h"
+#include "mozilla/StaticMutex.h"
+
+using namespace mozilla;
+
+NS_IMPL_ISUPPORTS(nsEnvironment, nsIEnvironment)
+
+nsresult nsEnvironment::Create(REFNSIID aIID, void** aResult) {
+ nsresult rv;
+ *aResult = nullptr;
+
+ nsEnvironment* obj = new nsEnvironment();
+
+ rv = obj->QueryInterface(aIID, aResult);
+ if (NS_FAILED(rv)) {
+ delete obj;
+ }
+ return rv;
+}
+
+NS_IMETHODIMP
+nsEnvironment::Exists(const nsAString& aName, bool* aOutValue) {
+ nsAutoCString nativeName;
+ nsresult rv = NS_CopyUnicodeToNative(aName, nativeName);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ nsAutoCString nativeVal;
+#if defined(XP_UNIX)
+ /* For Unix/Linux platforms we follow the Unix definition:
+ * An environment variable exists when |getenv()| returns a non-nullptr
+ * value. An environment variable does not exist when |getenv()| returns
+ * nullptr.
+ */
+ const char* value = PR_GetEnv(nativeName.get());
+ *aOutValue = value && *value;
+#else
+ /* For non-Unix/Linux platforms we have to fall back to a
+ * "portable" definition (which is incorrect for Unix/Linux!!!!)
+ * which simply checks whether the string returned by |Get()| is empty
+ * or not.
+ */
+ nsAutoString value;
+ Get(aName, value);
+ *aOutValue = !value.IsEmpty();
+#endif /* XP_UNIX */
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsEnvironment::Get(const nsAString& aName, nsAString& aOutValue) {
+ nsAutoCString nativeName;
+ nsresult rv = NS_CopyUnicodeToNative(aName, nativeName);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ nsAutoCString nativeVal;
+ const char* value = PR_GetEnv(nativeName.get());
+ if (value && *value) {
+ rv = NS_CopyNativeToUnicode(nsDependentCString(value), aOutValue);
+ } else {
+ aOutValue.Truncate();
+ rv = NS_OK;
+ }
+
+ return rv;
+}
+
+/* Environment strings must have static duration; We're gonna leak all of this
+ * at shutdown: this is by design, caused how Unix/Linux implement environment
+ * vars.
+ */
+
+typedef nsBaseHashtableET<nsCharPtrHashKey, char*> EnvEntryType;
+typedef nsTHashtable<EnvEntryType> EnvHashType;
+
+static StaticMutex gEnvHashMutex;
+static EnvHashType* gEnvHash MOZ_GUARDED_BY(gEnvHashMutex) = nullptr;
+
+static EnvHashType* EnsureEnvHash() MOZ_REQUIRES(gEnvHashMutex) {
+ if (!gEnvHash) {
+ gEnvHash = new EnvHashType;
+ }
+ return gEnvHash;
+}
+
+NS_IMETHODIMP
+nsEnvironment::Set(const nsAString& aName, const nsAString& aValue) {
+ nsAutoCString nativeName;
+ nsAutoCString nativeVal;
+
+ nsresult rv = NS_CopyUnicodeToNative(aName, nativeName);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ rv = NS_CopyUnicodeToNative(aValue, nativeVal);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ StaticMutexAutoLock lock(gEnvHashMutex);
+ EnvEntryType* entry = EnsureEnvHash()->PutEntry(nativeName.get());
+ if (!entry) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ SmprintfPointer newData =
+ mozilla::Smprintf("%s=%s", nativeName.get(), nativeVal.get());
+ if (!newData) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ PR_SetEnv(newData.get());
+ if (entry->GetData()) {
+ free(entry->GetData());
+ }
+ entry->SetData(newData.release());
+ return NS_OK;
+}
diff --git a/xpcom/threads/nsEnvironment.h b/xpcom/threads/nsEnvironment.h
new file mode 100644
index 0000000000..d371050ec5
--- /dev/null
+++ b/xpcom/threads/nsEnvironment.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsEnvironment_h__
+#define nsEnvironment_h__
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Mutex.h"
+#include "nsIEnvironment.h"
+
+#define NS_ENVIRONMENT_CID \
+ { \
+ 0X3D68F92UL, 0X9513, 0X4E25, { \
+ 0X9B, 0XE9, 0X7C, 0XB2, 0X39, 0X87, 0X41, 0X72 \
+ } \
+ }
+#define NS_ENVIRONMENT_CONTRACTID "@mozilla.org/process/environment;1"
+
+class nsEnvironment final : public nsIEnvironment {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIENVIRONMENT
+
+ static nsresult Create(REFNSIID aIID, void** aResult);
+
+ private:
+ nsEnvironment() {}
+ ~nsEnvironment() = default;
+};
+
+#endif /* !nsEnvironment_h__ */
diff --git a/xpcom/threads/nsICancelableRunnable.h b/xpcom/threads/nsICancelableRunnable.h
new file mode 100644
index 0000000000..7aa98c86b6
--- /dev/null
+++ b/xpcom/threads/nsICancelableRunnable.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsICancelableRunnable_h__
+#define nsICancelableRunnable_h__
+
+#include "nsISupports.h"
+
+#define NS_ICANCELABLERUNNABLE_IID \
+ { \
+ 0xde93dc4c, 0x5eea, 0x4eb7, { \
+ 0xb6, 0xd1, 0xdb, 0xf1, 0xe0, 0xce, 0xf6, 0x5c \
+ } \
+ }
+
+class nsICancelableRunnable : public nsISupports {
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_ICANCELABLERUNNABLE_IID)
+
+ /*
+ * Cancels a pending task, so that calling run() on the task is a no-op.
+ * Calling cancel after the task execution has begun will be a no-op.
+ * Calling this method twice is considered an error.
+ *
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that the runnable has already been canceled.
+ */
+ virtual nsresult Cancel() = 0;
+
+ protected:
+ nsICancelableRunnable() = default;
+ virtual ~nsICancelableRunnable() = default;
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsICancelableRunnable, NS_ICANCELABLERUNNABLE_IID)
+
+#endif // nsICancelableRunnable_h__
diff --git a/xpcom/threads/nsIDirectTaskDispatcher.idl b/xpcom/threads/nsIDirectTaskDispatcher.idl
new file mode 100644
index 0000000000..7d44608708
--- /dev/null
+++ b/xpcom/threads/nsIDirectTaskDispatcher.idl
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIRunnable.idl"
+
+%{C++
+#include "mozilla/AlreadyAddRefed.h"
+%}
+
+native alreadyAddRefed_nsIRunnable(already_AddRefed<nsIRunnable>);
+
+/*
+ * The primary use of this interface is to allow any nsISerialEventTarget to
+ * provide Direct Task dispatching which is similar (but not identical to) the
+ * microtask semantics of JS promises.
+ * New direct task may be dispatched when a current direct task is running. In
+ * which case they will be run in FIFO order.
+ */
+[uuid(e05bf0fe-94b7-4e28-8462-a8368da9c136)]
+interface nsIDirectTaskDispatcher : nsISupports
+{
+ /**
+ * Dispatch an event for the nsISerialEventTarget, using the direct task
+ * queue.
+ *
+ * This function must be called from the same nsISerialEventTarget
+ * implementing direct task dispatching.
+ *
+ * @param event
+ * The alreadyAddRefed<> event to dispatch.
+ *
+ */
+ [noscript] void dispatchDirectTask(in alreadyAddRefed_nsIRunnable event);
+
+ /**
+ * Synchronously run any pending direct tasks queued.
+ */
+ [noscript] void drainDirectTasks();
+
+ /**
+ * Returns true if any direct tasks are pending.
+ */
+ [noscript] bool haveDirectTasks();
+
+ %{C++
+ // Infallible version of the above. Will assert that it is successful.
+ bool HaveDirectTasks() {
+ bool value = false;
+ MOZ_ALWAYS_SUCCEEDS(HaveDirectTasks(&value));
+ return value;
+ }
+ %}
+
+};
diff --git a/xpcom/threads/nsIDiscardableRunnable.h b/xpcom/threads/nsIDiscardableRunnable.h
new file mode 100644
index 0000000000..873b1f5d93
--- /dev/null
+++ b/xpcom/threads/nsIDiscardableRunnable.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef XPCOM_THREADS_NSIDISCARDABLERUNNABLE_H_
+#define XPCOM_THREADS_NSIDISCARDABLERUNNABLE_H_
+
+#include "nsISupports.h"
+
+/**
+ * An interface implemented by nsIRunnable tasks for which nsIRunnable::Run()
+ * might not be called.
+ */
+#define NS_IDISCARDABLERUNNABLE_IID \
+ { \
+ 0xde93dc4c, 0x755c, 0x4cdc, { \
+ 0x96, 0x76, 0x35, 0xc6, 0x48, 0x81, 0x59, 0x78 \
+ } \
+ }
+
+class NS_NO_VTABLE nsIDiscardableRunnable : public nsISupports {
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IDISCARDABLERUNNABLE_IID)
+
+ /**
+ * Called exactly once on a queued task only if nsIRunnable::Run() is not
+ * called.
+ */
+ virtual void OnDiscard() = 0;
+
+ protected:
+ nsIDiscardableRunnable() = default;
+ virtual ~nsIDiscardableRunnable() = default;
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIDiscardableRunnable,
+ NS_IDISCARDABLERUNNABLE_IID)
+
+#endif // XPCOM_THREADS_NSIDISCARDABLERUNNABLE_H_
diff --git a/xpcom/threads/nsIEnvironment.idl b/xpcom/threads/nsIEnvironment.idl
new file mode 100644
index 0000000000..60da8ba76a
--- /dev/null
+++ b/xpcom/threads/nsIEnvironment.idl
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+/**
+ * Scriptable access to the current process environment.
+ *
+ */
+[scriptable, uuid(101d5941-d820-4e85-a266-9a3469940807)]
+interface nsIEnvironment : nsISupports
+{
+ /**
+ * Set the value of an environment variable.
+ *
+ * @param aName the variable name to set.
+ * @param aValue the value to set.
+ */
+ void set(in AString aName, in AString aValue);
+
+ /**
+ * Get the value of an environment variable.
+ *
+ * @param aName the variable name to retrieve.
+ * @return returns the value of the env variable. An empty string
+ * will be returned when the env variable does not exist or
+ * when the value itself is an empty string - please use
+ * |exists()| to probe whether the env variable exists
+ * or not.
+ */
+ AString get(in AString aName);
+
+ /**
+ * Check the existence of an environment variable.
+ * This method checks whether an environment variable is present in
+ * the environment or not.
+ *
+ * - For Unix/Linux platforms we follow the Unix definition:
+ * An environment variable exists when |getenv()| returns a non-NULL value.
+ * An environment variable does not exist when |getenv()| returns NULL.
+ * - For non-Unix/Linux platforms we have to fall back to a
+ * "portable" definition (which is incorrect for Unix/Linux!!!!)
+ * which simply checks whether the string returned by |Get()| is empty
+ * or not.
+ *
+ * @param aName the variable name to probe.
+ * @return if the variable has been set, the value returned is
+ * PR_TRUE. If the variable was not defined in the
+ * environment PR_FALSE will be returned.
+ */
+ boolean exists(in AString aName);
+};
diff --git a/xpcom/threads/nsIEventTarget.idl b/xpcom/threads/nsIEventTarget.idl
new file mode 100644
index 0000000000..2139dc5ab3
--- /dev/null
+++ b/xpcom/threads/nsIEventTarget.idl
@@ -0,0 +1,227 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+#include "nsIRunnable.idl"
+%{C++
+#include "nsCOMPtr.h"
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/Atomics.h"
+
+class nsITargetShutdownTask;
+%}
+
+native alreadyAddRefed_nsIRunnable(already_AddRefed<nsIRunnable>);
+[ptr] native nsITargetShutdownTask(nsITargetShutdownTask);
+
+[builtinclass, scriptable, uuid(a03b8b63-af8b-4164-b0e5-c41e8b2b7cfa)]
+interface nsIEventTarget : nsISupports
+{
+ /* until we can get rid of all uses, keep the non-alreadyAddRefed<> version */
+%{C++
+ nsresult Dispatch(nsIRunnable* aEvent, uint32_t aFlags) {
+ return Dispatch(nsCOMPtr<nsIRunnable>(aEvent).forget(), aFlags);
+ }
+%}
+
+ /**
+ * This flag specifies the default mode of event dispatch, whereby the event
+ * is simply queued for later processing. When this flag is specified,
+ * dispatch returns immediately after the event is queued.
+ */
+ const unsigned long DISPATCH_NORMAL = 0;
+
+ // NOTE: 1 used to be DISPATCH_SYNC
+
+ /**
+ * This flag specifies that the dispatch is occurring from a running event
+ * that was dispatched to the same event target, and that event is about to
+ * finish.
+ *
+ * A thread pool can use this as an optimization hint to not spin up
+ * another thread, since the current thread is about to become idle.
+ *
+ * These events are always async.
+ */
+ const unsigned long DISPATCH_AT_END = 2;
+
+ /**
+ * This flag specifies that the dispatched event may block the thread on
+ * which it executes, usually by doing some sort of I/O. This information
+ * may be used by the event target to execute the job on a thread
+ * specifically dedicated to doing I/O, leaving other threads available for
+ * CPU-intensive work.
+ */
+ const unsigned long DISPATCH_EVENT_MAY_BLOCK = 4;
+
+ /**
+ * This flag specifies that the dispatched event should be delivered to the
+ * target thread even if the thread has been configured to block dispatching
+ * of runnables. This is generally done for threads which have their own
+ * internal event loop, such as thread pools or the timer thread, and will not
+ * service runnables dispatched to them until shutdown.
+ */
+ const unsigned long DISPATCH_IGNORE_BLOCK_DISPATCH = 8;
+
+ /**
+ * IsOnCurrentThread() should return true if events dispatched to this target
+ * can possibly run on the current thread, and false otherwise. In the case
+ * of an nsIEventTarget for a thread pool, it should return true on all
+ * threads in the pool. In the case of a non-thread nsIEventTarget such as
+ * ThrottledEventQueue, it should return true on the thread where events are
+ * expected to be processed, even if no events from the queue are actually
+ * being processed right now.
+ *
+ * When called on an nsISerialEventTarget, IsOnCurrentThread can be used to
+ * ensure that no other thread has "ownership" of the event target. As such,
+ * it's useful for asserting that an object is only used on a particular
+ * thread. IsOnCurrentThread can't guarantee that the current event has been
+ * dispatched through a particular event target.
+ *
+ * The infallible version of IsOnCurrentThread() is optimized to avoid a
+ * virtual call for non-thread event targets. Thread targets should set
+ * mThread to their virtual PRThread. Non-thread targets should leave
+ * mThread null and implement IsOnCurrentThreadInfallible() to
+ * return the correct answer.
+ *
+ * The fallible version of IsOnCurrentThread may return errors, such as during
+ * shutdown. If it does not return an error, it should return the same result
+ * as the infallible version. The infallible method should return the correct
+ * result regardless of whether the fallible method returns an error.
+ */
+ %{C++
+public:
+ // Infallible. Defined in nsThreadUtils.cpp. Delegates to
+ // IsOnCurrentThreadInfallible when mThread is null.
+ bool IsOnCurrentThread();
+
+protected:
+ mozilla::Atomic<PRThread*> mThread;
+
+ nsIEventTarget() : mThread(nullptr) {}
+ %}
+ // Note that this method is protected. We define it through IDL, rather than
+ // in a %{C++ block, to ensure that the correct method indices are recorded
+ // for XPConnect purposes.
+ [noscript,notxpcom] boolean isOnCurrentThreadInfallible();
+ %{C++
+public:
+ %}
+
+ // Fallible version of IsOnCurrentThread.
+ boolean isOnCurrentThread();
+
+ /**
+ * Dispatch an event to this event target. This function may be called from
+ * any thread, and it may be called re-entrantly.
+ *
+ * @param event
+ * The alreadyAddRefed<> event to dispatch.
+ * NOTE that the event will be leaked if it fails to dispatch.
+ * @param flags
+ * The flags modifying event dispatch. The flags are described in detail
+ * below.
+ *
+ * @throws NS_ERROR_INVALID_ARG
+ * Indicates that event is null.
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that the thread is shutting down and has finished processing
+ * events, so this event would never run and has not been dispatched.
+ */
+ [noscript, binaryname(Dispatch)] void dispatchFromC(in alreadyAddRefed_nsIRunnable event,
+ [default(DISPATCH_NORMAL)] in unsigned long flags);
+ /**
+ * Version of Dispatch to expose to JS, which doesn't require an alreadyAddRefed<>
+ * (it will be converted to that internally)
+ *
+ * @param event
+ * The (raw) event to dispatch.
+ * @param flags
+ * The flags modifying event dispatch. The flags are described in detail
+ * below.
+ *
+ * @throws NS_ERROR_INVALID_ARG
+ * Indicates that event is null.
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that the thread is shutting down and has finished processing
+ * events, so this event would never run and has not been dispatched.
+ */
+ [binaryname(DispatchFromScript)] void dispatch(in nsIRunnable event, in unsigned long flags);
+ /**
+ * Dispatch an event to this event target, but do not run it before delay
+ * milliseconds have passed. This function may be called from any thread.
+ *
+ * @param event
+ * The alreadyAddrefed<> event to dispatch.
+ * @param delay
+ * The delay (in ms) before running the event. If event does not rise to
+ * the top of the event queue before the delay has passed, it will be set
+ * aside to execute once the delay has passed. Otherwise, it will be
+ * executed immediately.
+ *
+ * @throws NS_ERROR_INVALID_ARG
+ * Indicates that event is null.
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that the thread is shutting down and has finished processing
+ * events, so this event would never run and has not been dispatched, or
+ * that delay is zero.
+ */
+ [noscript] void delayedDispatch(in alreadyAddRefed_nsIRunnable event, in unsigned long delay);
+
+ /**
+ * Register an task to be run on this event target when it begins shutting
+ * down. Shutdown tasks may be run in any order, and this function may be
+ * called from any thread.
+ *
+ * The event target may or may not continue accepting events during or after
+ * the shutdown task. The precise behaviour here depends on the event target.
+ *
+ * @param task
+ * The task to be registered to the target thread.
+ * NOTE that unlike `dispatch`, this will not leak the task if it fails.
+ *
+ * @throws NS_ERROR_INVALID_ARG
+ * Indicates that task is null.
+ * @throws NS_ERROR_NOT_IMPLEMENTED
+ * Indicates that this event target doesn't support shutdown tasks.
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that the thread is already shutting down, and no longer
+ * accepting events.
+ */
+ [noscript] void registerShutdownTask(in nsITargetShutdownTask task);
+
+ /**
+ * Unregisters an task previously registered with registerShutdownTask. This
+ * function may be called from any thread.
+ *
+ * @param task
+ * The task previously registered with registerShutdownTask
+ *
+ * @throws NS_ERROR_INVALID_ARG
+ * Indicates that task is null.
+ * @throws NS_ERROR_NOT_IMPLEMENTED
+ * Indicates that this event target doesn't support shutdown tasks.
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that the thread is already shutting down, and no longer
+ * accepting events, or that the shutdown task cannot be found.
+ */
+ [noscript] void unregisterShutdownTask(in nsITargetShutdownTask task);
+};
+
+%{C++
+// convenient aliases:
+#define NS_DISPATCH_NORMAL nsIEventTarget::DISPATCH_NORMAL
+#define NS_DISPATCH_AT_END nsIEventTarget::DISPATCH_AT_END
+#define NS_DISPATCH_EVENT_MAY_BLOCK nsIEventTarget::DISPATCH_EVENT_MAY_BLOCK
+#define NS_DISPATCH_IGNORE_BLOCK_DISPATCH nsIEventTarget::DISPATCH_IGNORE_BLOCK_DISPATCH
+
+// Convenient NS_DECL variant that includes some C++-only methods.
+#define NS_DECL_NSIEVENTTARGET_FULL \
+ NS_DECL_NSIEVENTTARGET \
+ /* Avoid hiding these methods */ \
+ using nsIEventTarget::Dispatch; \
+ using nsIEventTarget::IsOnCurrentThread;
+%}
diff --git a/xpcom/threads/nsIIdlePeriod.idl b/xpcom/threads/nsIIdlePeriod.idl
new file mode 100644
index 0000000000..03ab45d80d
--- /dev/null
+++ b/xpcom/threads/nsIIdlePeriod.idl
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+%{C++
+namespace mozilla {
+class TimeStamp;
+}
+%}
+
+native TimeStamp(mozilla::TimeStamp);
+
+/**
+ * An instance implementing nsIIdlePeriod is used by an associated
+ * nsIThread to estimate when it is likely that it will receive an
+ * event.
+ */
+[uuid(21dd35a2-eae9-4bd8-b470-0dfa35a0e3b9)]
+interface nsIIdlePeriod : nsISupports
+{
+ /**
+ * Return an estimate of a point in time in the future when we
+ * think that the associated thread will become busy. Should
+ * return TimeStamp() (i.e. the null time) or a time less than
+ * TimeStamp::Now() if the thread is currently busy or will become
+ * busy very soon.
+ */
+ TimeStamp getIdlePeriodHint();
+};
diff --git a/xpcom/threads/nsIIdleRunnable.h b/xpcom/threads/nsIIdleRunnable.h
new file mode 100644
index 0000000000..7fe6149154
--- /dev/null
+++ b/xpcom/threads/nsIIdleRunnable.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsIIdleRunnable_h__
+#define nsIIdleRunnable_h__
+
+#include "nsISupports.h"
+#include "mozilla/TimeStamp.h"
+
+#define NS_IIDLERUNNABLE_IID \
+ { \
+ 0x688be92e, 0x7ade, 0x4fdc, { \
+ 0x9d, 0x83, 0x74, 0xcb, 0xef, 0xf4, 0xa5, 0x2c \
+ } \
+ }
+
+class nsIEventTarget;
+
+/**
+ * A task interface for tasks that can schedule their work to happen
+ * in increments bounded by a deadline.
+ */
+class nsIIdleRunnable : public nsISupports {
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IIDLERUNNABLE_IID)
+
+ /**
+ * Notify the task of a point in time in the future when the task
+ * should stop executing.
+ */
+ virtual void SetDeadline(mozilla::TimeStamp aDeadline){};
+ virtual void SetTimer(uint32_t aDelay, nsIEventTarget* aTarget) {
+ MOZ_ASSERT_UNREACHABLE(
+ "The nsIIdleRunnable instance does not support "
+ "idle dispatch with timeout!");
+ };
+
+ protected:
+ nsIIdleRunnable() = default;
+ virtual ~nsIIdleRunnable() = default;
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIIdleRunnable, NS_IIDLERUNNABLE_IID)
+
+#endif // nsIIdleRunnable_h__
diff --git a/xpcom/threads/nsINamed.idl b/xpcom/threads/nsINamed.idl
new file mode 100644
index 0000000000..cdb7d88f30
--- /dev/null
+++ b/xpcom/threads/nsINamed.idl
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+/**
+ * Represents an object with a name, such as a runnable or a timer.
+ */
+
+[scriptable, uuid(0c5fe7de-7e83-4d0d-a8a6-4a6518b9a7b3)]
+interface nsINamed : nsISupports
+{
+ /*
+ * A string describing the purpose of the runnable/timer/whatever. Useful
+ * for debugging. This attribute is read-only, but you can change it to a
+ * compile-time string literal with setName.
+ *
+ * WARNING: This attribute will be included in telemetry, so it should
+ * never contain privacy sensitive information.
+ */
+ readonly attribute AUTF8String name;
+};
diff --git a/xpcom/threads/nsIProcess.idl b/xpcom/threads/nsIProcess.idl
new file mode 100644
index 0000000000..c15ded7a2f
--- /dev/null
+++ b/xpcom/threads/nsIProcess.idl
@@ -0,0 +1,112 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+interface nsIFile;
+interface nsIObserver;
+
+[scriptable, uuid(609610de-9954-4a63-8a7c-346350a86403)]
+interface nsIProcess : nsISupports
+{
+ /**
+ * Initialises the process with an executable to be run. Call the run method
+ * to run the executable.
+ * @param executable The executable to run.
+ */
+ void init(in nsIFile executable);
+
+ /**
+ * Kills the running process. After exiting the process will either have
+ * been killed or a failure will have been returned.
+ */
+ void kill();
+
+ /**
+ * Executes the file this object was initialized with
+ * @param blocking Whether to wait until the process terminates before
+ returning or not.
+ * @param args An array of arguments to pass to the process in the
+ * native character set.
+ * @param count The length of the args array.
+ */
+ void run(in boolean blocking, [array, size_is(count)] in string args,
+ in unsigned long count);
+
+ /**
+ * Executes the file this object was initialized with optionally calling
+ * an observer after the process has finished running.
+ * @param args An array of arguments to pass to the process in the
+ * native character set.
+ * @param count The length of the args array.
+ * @param observer An observer to notify when the process has completed. It
+ * will receive this process instance as the subject and
+ * "process-finished" or "process-failed" as the topic. The
+ * observer will be notified on the main thread.
+ * @param holdWeak Whether to use a weak reference to hold the observer.
+ */
+ void runAsync([array, size_is(count)] in string args, in unsigned long count,
+ [optional] in nsIObserver observer, [optional] in boolean holdWeak);
+
+ /**
+ * Executes the file this object was initialized with
+ * @param blocking Whether to wait until the process terminates before
+ returning or not.
+ * @param args An array of arguments to pass to the process in UTF-16
+ * @param count The length of the args array.
+ */
+ void runw(in boolean blocking, [array, size_is(count)] in wstring args,
+ in unsigned long count);
+
+ /**
+ * Executes the file this object was initialized with optionally calling
+ * an observer after the process has finished running.
+ * @param args An array of arguments to pass to the process in UTF-16
+ * @param count The length of the args array.
+ * @param observer An observer to notify when the process has completed. It
+ * will receive this process instance as the subject and
+ * "process-finished" or "process-failed" as the topic. The
+ * observer will be notified on the main thread.
+ * @param holdWeak Whether to use a weak reference to hold the observer.
+ */
+ void runwAsync([array, size_is(count)] in wstring args,
+ in unsigned long count,
+ [optional] in nsIObserver observer, [optional] in boolean holdWeak);
+
+ /**
+ * When set to true the process will not open a new window when started and
+ * will run hidden from the user. This currently affects only the Windows
+ * platform.
+ */
+ attribute boolean startHidden;
+
+ /**
+ * When set to true the process will be launched directly without using the
+ * shell. This currently affects only the Windows platform.
+ */
+ attribute boolean noShell;
+
+ /**
+ * The process identifier of the currently running process. This will only
+ * be available after the process has started and may not be available on
+ * some platforms.
+ */
+ readonly attribute unsigned long pid;
+
+ /**
+ * The exit value of the process. This is only valid after the process has
+ * exited.
+ */
+ readonly attribute long exitValue;
+
+ /**
+ * Returns whether the process is currently running or not.
+ */
+ readonly attribute boolean isRunning;
+};
+
+%{C++
+
+#define NS_PROCESS_CONTRACTID "@mozilla.org/process/util;1"
+%}
diff --git a/xpcom/threads/nsIRunnable.idl b/xpcom/threads/nsIRunnable.idl
new file mode 100644
index 0000000000..bfe9669a9f
--- /dev/null
+++ b/xpcom/threads/nsIRunnable.idl
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+/**
+ * Represents a task which can be dispatched to a thread for execution.
+ */
+
+[scriptable, function, uuid(4a2abaf0-6886-11d3-9382-00104ba0fd40)]
+interface nsIRunnable : nsISupports
+{
+ /**
+ * The function implementing the task to be run.
+ */
+ void run();
+};
+
+[scriptable, uuid(e75aa42a-80a9-11e6-afb5-e89d87348e2c)]
+interface nsIRunnablePriority : nsISupports
+{
+ const unsigned long PRIORITY_IDLE = 0;
+ const unsigned long PRIORITY_DEFERRED_TIMERS = 1;
+ const unsigned long PRIORITY_LOW = 2;
+ // INPUT_LOW isn't supposed to be used directly.
+ // const unsigned long PRIORITY_INPUT_LOW = 3;
+ const unsigned long PRIORITY_NORMAL = 4;
+ const unsigned long PRIORITY_MEDIUMHIGH = 5;
+ const unsigned long PRIORITY_INPUT_HIGH = 6;
+ const unsigned long PRIORITY_VSYNC = 7;
+ // INPUT_HIGHEST is InputTaskManager's internal priority
+ //const unsigned long PRIORITY_INPUT_HIGHEST = 8;
+ const unsigned long PRIORITY_RENDER_BLOCKING = 9;
+ const unsigned long PRIORITY_CONTROL = 10;
+
+ readonly attribute unsigned long priority;
+};
+
+[uuid(3114c36c-a482-4c6e-9523-1dcfc6f605b9)]
+interface nsIRunnableIPCMessageType : nsISupports
+{
+ readonly attribute unsigned long type;
+};
diff --git a/xpcom/threads/nsISerialEventTarget.idl b/xpcom/threads/nsISerialEventTarget.idl
new file mode 100644
index 0000000000..9cf7768b37
--- /dev/null
+++ b/xpcom/threads/nsISerialEventTarget.idl
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIEventTarget.idl"
+
+/**
+ * A serial event target is an event dispatching interface like
+ * nsIEventTarget. Runnables dispatched to an nsISerialEventTarget are required
+ * to execute serially. That is, two different runnables dispatched to the
+ * target should never be allowed to execute simultaneously. One exception to
+ * this rule is nested event loops. If a runnable spins a nested event loop,
+ * causing another runnable dispatched to the target to run, the target may
+ * still be considered "serial".
+ *
+ * Examples:
+ * - nsIThread is a serial event target.
+ * - Thread pools are not serial event targets.
+ * - However, one can "convert" a thread pool into an nsISerialEventTarget
+ * by putting a TaskQueue in front of it.
+ */
+[builtinclass, scriptable, uuid(9f982380-24b4-49f3-88f6-45e2952036c7)]
+interface nsISerialEventTarget : nsIEventTarget
+{
+};
diff --git a/xpcom/threads/nsISupportsPriority.idl b/xpcom/threads/nsISupportsPriority.idl
new file mode 100644
index 0000000000..d0b8b9a3dd
--- /dev/null
+++ b/xpcom/threads/nsISupportsPriority.idl
@@ -0,0 +1,45 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+/**
+ * This interface exposes the general notion of a scheduled object with a
+ * integral priority value. Following UNIX conventions, smaller (and possibly
+ * negative) values have higher priority.
+ *
+ * This interface does not strictly define what happens when the priority of an
+ * object is changed. An implementation of this interface is free to define
+ * the side-effects of changing the priority of an object. In some cases,
+ * changing the priority of an object may be disallowed (resulting in an
+ * exception being thrown) or may simply be ignored.
+ */
+[scriptable, uuid(aa578b44-abd5-4c19-8b14-36d4de6fdc36)]
+interface nsISupportsPriority : nsISupports
+{
+ /**
+ * Typical priority values.
+ */
+ const long PRIORITY_HIGHEST = -20;
+ const long PRIORITY_HIGH = -10;
+ const long PRIORITY_NORMAL = 0;
+ const long PRIORITY_LOW = 10;
+ const long PRIORITY_LOWEST = 20;
+
+ /**
+ * This attribute may be modified to change the priority of this object. The
+ * implementation of this interface is free to truncate a given priority
+ * value to whatever limits are appropriate. Typically, this attribute is
+ * initialized to PRIORITY_NORMAL, but implementations may choose to assign a
+ * different initial value.
+ */
+ attribute long priority;
+
+ /**
+ * This method adjusts the priority attribute by a given delta. It helps
+ * reduce the amount of coding required to increment or decrement the value
+ * of the priority attribute.
+ */
+ void adjustPriority(in long delta);
+};
diff --git a/xpcom/threads/nsITargetShutdownTask.h b/xpcom/threads/nsITargetShutdownTask.h
new file mode 100644
index 0000000000..09ac3c5e5f
--- /dev/null
+++ b/xpcom/threads/nsITargetShutdownTask.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef XPCOM_THREADS_NSITARGETSHUTDOWNTASK_H_
+#define XPCOM_THREADS_NSITARGETSHUTDOWNTASK_H_
+
+#include "nsISupports.h"
+#include "nsIEventTarget.h"
+#include "nsThreadUtils.h"
+
+#define NS_ITARGETSHUTDOWNTASK_IID \
+ { \
+ 0xb08647aa, 0xcfb5, 0x4630, { \
+ 0x8e, 0x26, 0x9a, 0xbe, 0xb3, 0x3f, 0x08, 0x40 \
+ } \
+ }
+
+class NS_NO_VTABLE nsITargetShutdownTask : public nsISupports {
+ public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_ISHUTDOWNTASK_IID)
+
+ virtual void TargetShutdown() = 0;
+
+ already_AddRefed<nsIRunnable> AsRunnable() {
+ // FIXME: Try QI to nsINamed if available?
+ return mozilla::NewRunnableMethod("nsITargetShutdownTask::TargetShutdown",
+ this,
+ &nsITargetShutdownTask::TargetShutdown);
+ }
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsITargetShutdownTask, NS_ITARGETSHUTDOWNTASK_IID)
+
+#endif
diff --git a/xpcom/threads/nsIThread.idl b/xpcom/threads/nsIThread.idl
new file mode 100644
index 0000000000..e6735d5d64
--- /dev/null
+++ b/xpcom/threads/nsIThread.idl
@@ -0,0 +1,222 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISerialEventTarget.idl"
+#include "nsIThreadShutdown.idl"
+
+%{C++
+#include "mozilla/AlreadyAddRefed.h"
+
+namespace mozilla {
+class TimeStamp;
+class TimeDurationValueCalculator;
+template <typename T> class BaseTimeDuration;
+typedef BaseTimeDuration<TimeDurationValueCalculator> TimeDuration;
+enum class EventQueuePriority;
+}
+%}
+
+[ptr] native PRThread(PRThread);
+native EventQueuePriority(mozilla::EventQueuePriority);
+
+native nsIEventTargetPtr(nsIEventTarget*);
+native nsISerialEventTargetPtr(nsISerialEventTarget*);
+native TimeStamp(mozilla::TimeStamp);
+native TimeDuration(mozilla::TimeDuration);
+
+/**
+ * This interface provides a high-level abstraction for an operating system
+ * thread.
+ *
+ * Threads have a built-in event queue, and a thread is an event target that
+ * can receive nsIRunnable objects (events) to be processed on the thread.
+ *
+ * See nsIThreadManager for the API used to create and locate threads.
+ */
+[builtinclass, scriptable, uuid(5801d193-29d1-4964-a6b7-70eb697ddf2b)]
+interface nsIThread : nsISerialEventTarget
+{
+ /**
+ * @returns
+ * The NSPR thread object corresponding to this nsIThread.
+ */
+ [noscript] readonly attribute PRThread PRThread;
+
+ /**
+ * @returns
+ * Whether or not this thread may call into JS. Used in the profiler
+ * to avoid some unnecessary locking.
+ */
+ [noscript] attribute boolean CanInvokeJS;
+
+ /**
+ * Thread QoS priorities. Currently only supported on MacOS.
+ */
+
+ cenum QoSPriority : 32 {
+ QOS_PRIORITY_NORMAL,
+ QOS_PRIORITY_LOW
+ };
+
+ /**
+ * Shutdown the thread. This method prevents further dispatch of events to
+ * the thread, and it causes any pending events to run to completion before
+ * the thread joins (see PR_JoinThread) with the current thread. During this
+ * method call, events for the current thread may be processed.
+ *
+ * This method MAY NOT be executed from the thread itself. Instead, it is
+ * meant to be executed from another thread (usually the thread that created
+ * this thread or the main application thread). When this function returns,
+ * the thread will be shutdown, and it will no longer be possible to dispatch
+ * events to the thread.
+ *
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that this method was erroneously called when this thread was
+ * the current thread, that this thread was not created with a call to
+ * nsIThreadManager::NewThread, or if this method was called more than once
+ * on the thread object.
+ */
+ void shutdown();
+
+ /**
+ * This method may be called to determine if there are any events ready to be
+ * processed. It may only be called when this thread is the current thread.
+ *
+ * Because events may be added to this thread by another thread, a "false"
+ * result does not mean that this thread has no pending events. It only
+ * means that there were no pending events when this method was called.
+ *
+ * @returns
+ * A boolean value that if "true" indicates that this thread has one or
+ * more pending events.
+ *
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that this method was erroneously called when this thread was
+ * not the current thread.
+ */
+ boolean hasPendingEvents();
+
+ /**
+ * Similar to above, but checks only possible high priority queue.
+ */
+ boolean hasPendingHighPriorityEvents();
+
+ /**
+ * Process the next event. If there are no pending events, then this method
+ * may wait -- depending on the value of the mayWait parameter -- until an
+ * event is dispatched to this thread. This method is re-entrant but may
+ * only be called if this thread is the current thread.
+ *
+ * @param mayWait
+ * A boolean parameter that if "true" indicates that the method may block
+ * the calling thread to wait for a pending event.
+ *
+ * @returns
+ * A boolean value that if "true" indicates that an event was processed.
+ *
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that this method was erroneously called when this thread was
+ * not the current thread.
+ */
+ boolean processNextEvent(in boolean mayWait);
+
+ /**
+ * Shutdown the thread asynchronously. This method immediately prevents
+ * further dispatch of events to the thread, and it causes any pending events
+ * to run to completion before this thread joins with the current thread.
+ *
+ * UNLIKE shutdown() this does not process events on the current thread.
+ * Instead it merely ensures that the current thread continues running until
+ * this thread has shut down.
+ *
+ * This method MAY NOT be executed from the thread itself. Instead, it is
+ * meant to be executed from another thread (usually the thread that created
+ * this thread or the main application thread). When this function returns,
+ * the thread will continue running until it exhausts its event queue.
+ *
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that this method was erroneously called when this thread was
+ * the current thread, that this thread was not created with a call to
+ * nsIThreadManager::NewNamedThread, or that this method was called more
+ * than once on the thread object.
+ */
+ void asyncShutdown();
+
+ /**
+ * Like `asyncShutdown`, but also returns a nsIThreadShutdown instance to
+ * allow observing and controlling the thread's async shutdown progress.
+ */
+ nsIThreadShutdown beginShutdown();
+
+ /**
+ * Dispatch an event to a specified queue for the thread. This function
+ * may be called from any thread, and it may be called re-entrantly.
+ * Most users should use the NS_Dispatch*() functions in nsThreadUtils instead
+ * of calling this directly.
+ *
+ * @param event
+ * The alreadyAddRefed<> event to dispatch.
+ * NOTE that the event will be leaked if it fails to dispatch.
+ * @param queue
+ * Which event priority queue this should be added to
+ *
+ * @throws NS_ERROR_INVALID_ARG
+ * Indicates that event is null.
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that the thread is shutting down and has finished processing
+ * events, so this event would never run and has not been dispatched.
+ */
+ [noscript] void dispatchToQueue(in alreadyAddRefed_nsIRunnable event,
+ in EventQueuePriority queue);
+
+ /**
+ * This is set to the end of the last 50+ms event that was executed on
+ * this thread (for MainThread only). Otherwise returns a null TimeStamp.
+ */
+ [noscript] readonly attribute TimeStamp lastLongTaskEnd;
+ [noscript] readonly attribute TimeStamp lastLongNonIdleTaskEnd;
+
+ /**
+ * Get information on the timing of the currently-running event.
+ *
+ * @param delay
+ * The amount of time the current running event in the specified queue waited
+ * to run. Will return TimeDuration() if the queue is empty or has not run any
+ * new events since event delay monitoring started. NOTE: delay will be
+ * TimeDuration() if this thread uses a PrioritizedEventQueue (i.e. MainThread)
+ * and the event priority is below Input.
+ * @param start
+ * The time the currently running event began to run, or TimeStamp() if no
+ * event is running.
+ */
+ [noscript] void getRunningEventDelay(out TimeDuration delay, out TimeStamp start);
+
+ /**
+ * Set information on the timing of the currently-running event.
+ * Overrides the values returned by getRunningEventDelay
+ *
+ * @param delay
+ * Delay the running event spent in queues, or TimeDuration() if
+ * there's no running event.
+ * @param start
+ * The time the currently running event began to run, or TimeStamp() if no
+ * event is running.
+ */
+ [noscript] void setRunningEventDelay(in TimeDuration delay, in TimeStamp start);
+
+ [noscript] void setNameForWakeupTelemetry(in ACString name);
+
+ /**
+ * Set the QoS priority of threads where this may be available. Currently
+ * restricted to MacOS. Must be on the thread to call this method.
+ *
+ * @param aPriority
+ * The specified priority we will adjust to. Can be low (background) or
+ * normal (default / user-interactive)
+ */
+ [noscript] void setThreadQoS(in nsIThread_QoSPriority aPriority);
+
+};
diff --git a/xpcom/threads/nsIThreadInternal.idl b/xpcom/threads/nsIThreadInternal.idl
new file mode 100644
index 0000000000..fca9e711b0
--- /dev/null
+++ b/xpcom/threads/nsIThreadInternal.idl
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIThread.idl"
+
+interface nsIRunnable;
+interface nsIThreadObserver;
+
+/**
+ * The XPCOM thread object implements this interface, which allows a consumer
+ * to observe dispatch activity on the thread.
+ */
+[builtinclass, scriptable, uuid(a3a72e5f-71d9-4add-8f30-59a78fb6d5eb)]
+interface nsIThreadInternal : nsIThread
+{
+ /**
+ * Get/set the current thread observer (may be null). This attribute may be
+ * read from any thread, but must only be set on the thread corresponding to
+ * this thread object. The observer will be released on the thread
+ * corresponding to this thread object after all other events have been
+ * processed during a call to Shutdown.
+ */
+ attribute nsIThreadObserver observer;
+
+ /**
+ * Add an observer that will *only* receive onProcessNextEvent,
+ * beforeProcessNextEvent. and afterProcessNextEvent callbacks. Always called
+ * on the target thread, and the implementation does not have to be
+ * threadsafe. Order of callbacks is not guaranteed (i.e.
+ * afterProcessNextEvent may be called first depending on whether or not the
+ * observer is added in a nested loop). Holds a strong ref.
+ */
+ void addObserver(in nsIThreadObserver observer);
+
+ /**
+ * Remove an observer added via the addObserver call. Once removed the
+ * observer will never be called again by the thread.
+ */
+ void removeObserver(in nsIThreadObserver observer);
+};
+
+/**
+ * This interface provides the observer with hooks to implement a layered
+ * event queue. For example, it is possible to overlay processing events
+ * for a GUI toolkit on top of the events for a thread:
+ *
+ * var NativeQueue;
+ * Observer = {
+ * onDispatchedEvent() {
+ * NativeQueue.signal();
+ * }
+ * onProcessNextEvent(thread, mayWait) {
+ * if (NativeQueue.hasNextEvent())
+ * NativeQueue.processNextEvent();
+ * while (mayWait && !thread.hasPendingEvent()) {
+ * NativeQueue.wait();
+ * NativeQueue.processNextEvent();
+ * }
+ * }
+ * };
+ *
+ * NOTE: The implementation of this interface must be threadsafe.
+ *
+ * NOTE: It is valid to change the thread's observer during a call to an
+ * observer method.
+ *
+ * NOTE: Will be split into two interfaces soon: one for onProcessNextEvent and
+ * afterProcessNextEvent, then another that inherits the first and adds
+ * onDispatchedEvent.
+ */
+[uuid(cc8da053-1776-44c2-9199-b5a629d0a19d)]
+interface nsIThreadObserver : nsISupports
+{
+ /**
+ * This method is called after an event has been dispatched to the thread.
+ * This method may be called from any thread.
+ */
+ void onDispatchedEvent();
+
+ /**
+ * This method is called when nsIThread::ProcessNextEvent is called. It does
+ * not guarantee that an event is actually going to be processed. This method
+ * is only called on the target thread.
+ *
+ * @param thread
+ * The thread being asked to process another event.
+ * @param mayWait
+ * Indicates whether or not the method is allowed to block the calling
+ * thread. For example, this parameter is false during thread shutdown.
+ */
+ void onProcessNextEvent(in nsIThreadInternal thread, in boolean mayWait);
+
+ /**
+ * This method is called (from nsIThread::ProcessNextEvent) after an event
+ * is processed. It does not guarantee that an event was actually processed
+ * (depends on the value of |eventWasProcessed|. This method is only called
+ * on the target thread. DO NOT EVER RUN SCRIPT FROM THIS CALLBACK!!!
+ *
+ * @param thread
+ * The thread that processed another event.
+ * @param eventWasProcessed
+ * Indicates whether an event was actually processed. May be false if the
+ * |mayWait| flag was false when calling nsIThread::ProcessNextEvent().
+ */
+ void afterProcessNextEvent(in nsIThreadInternal thread,
+ in bool eventWasProcessed);
+};
diff --git a/xpcom/threads/nsIThreadManager.idl b/xpcom/threads/nsIThreadManager.idl
new file mode 100644
index 0000000000..1001436364
--- /dev/null
+++ b/xpcom/threads/nsIThreadManager.idl
@@ -0,0 +1,173 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+[ptr] native PRThread(PRThread);
+ native ThreadCreationOptions(nsIThreadManager::ThreadCreationOptions);
+
+interface nsIEventTarget;
+interface nsIRunnable;
+interface nsIThread;
+
+[scriptable, function, uuid(039a227d-0cb7-44a5-a8f9-dbb7071979f2)]
+interface nsINestedEventLoopCondition : nsISupports
+{
+ /**
+ * Returns true if the current nested event loop should stop spinning.
+ */
+ bool isDone();
+};
+
+/**
+ * An interface for creating and locating nsIThread instances.
+ */
+[scriptable, uuid(1be89eca-e2f7-453b-8d38-c11ba247f6f3)]
+interface nsIThreadManager : nsISupports
+{
+ /**
+ * Default number of bytes reserved for a thread's stack, if no stack size
+ * is specified in newThread().
+ *
+ * Defaults can be a little overzealous for many platforms.
+ *
+ * On Linux and OS X, for instance, the default thread stack size is whatever
+ * getrlimit(RLIMIT_STACK) returns, which is often set at 8MB. Or, on Linux,
+ * if the stack size is unlimited, we fall back to 2MB. This causes particular
+ * problems on Linux, which allocates 2MB huge VM pages, and will often
+ * immediately allocate them for any stacks which are 2MB or larger.
+ *
+ * The default on Windows is 1MB, which is a little more reasonable. But the
+ * vast majority of our threads don't need anywhere near that much space.
+ *
+ * ASan, TSan and non-opt builds, however, often need a bit more, so give
+ * them the platform default.
+ */
+%{C++
+#if defined(MOZ_ASAN) || defined(MOZ_TSAN) || !defined(__OPTIMIZE__)
+ static constexpr uint32_t DEFAULT_STACK_SIZE = 0;
+#else
+ static constexpr uint32_t DEFAULT_STACK_SIZE = 256 * 1024;
+#endif
+
+ static const uint32_t kThreadPoolStackSize = DEFAULT_STACK_SIZE;
+
+ struct ThreadCreationOptions {
+ // The size in bytes to reserve for the thread's stack. A value of `0` means
+ // to use the platform default.
+ uint32_t stackSize = nsIThreadManager::DEFAULT_STACK_SIZE;
+
+ // If set to `true`, any attempts to dispatch runnables to this thread
+ // without `DISPATCH_IGNORE_BLOCK_DISPATCH` will fail.
+ //
+ // This is intended to be used for threads which are expected to generally
+ // only service a single runnable (other than thread lifecycle runnables),
+ // and perform their own event dispatching internaly, such as thread pool
+ // threads or the timer thread.
+ bool blockDispatch = false;
+
+ // (Windows-only) Whether the thread should have a MessageLoop capable of
+ // processing native UI events. Defaults to false.
+ bool isUiThread = false;
+ };
+%}
+
+ /**
+ * Create a new thread (a global, user PRThread) with the specified name.
+ *
+ * @param name
+ * The name of the thread. If it is empty the thread will not be named.
+ * @param options
+ * Configuration options for the newly created thread.
+ *
+ * @returns
+ * The newly created nsIThread object.
+ */
+ [noscript] nsIThread newNamedThread(in ACString name, in ThreadCreationOptions options);
+
+ /**
+ * Get the main thread.
+ */
+ readonly attribute nsIThread mainThread;
+
+ /**
+ * Get the current thread. If the calling thread does not already have a
+ * nsIThread associated with it, then a new nsIThread will be created and
+ * associated with the current PRThread.
+ */
+ readonly attribute nsIThread currentThread;
+
+ /**
+ * This queues a runnable to the main thread. It's a shortcut for JS callers
+ * to be used instead of
+ * .mainThread.dispatch(runnable, Ci.nsIEventTarget.DISPATCH_NORMAL);
+ * or
+ * .currentThread.dispatch(runnable, Ci.nsIEventTarget.DISPATCH_NORMAL);
+ * C++ callers should instead use NS_DispatchToMainThread.
+ */
+ [optional_argc]
+ void dispatchToMainThread(in nsIRunnable event, [optional] in uint32_t priority);
+
+ /**
+ * Similar to dispatchToMainThread, but wraps the event with extra
+ * runnable that allocates nsAutoMicroTask.
+ */
+ [optional_argc]
+ void dispatchToMainThreadWithMicroTask(in nsIRunnable event, [optional] in uint32_t priority);
+
+ /**
+ * This queues a runnable to the main thread's idle queue.
+ *
+ * @param event
+ * The event to dispatch.
+ * @param timeout
+ * The time in milliseconds until this event should be moved from the idle
+ * queue to the regular queue if it hasn't been executed by then. If not
+ * passed or a zero value is specified, the event will never be moved to
+ * the regular queue.
+ */
+ void idleDispatchToMainThread(in nsIRunnable event,
+ [optional] in uint32_t timeout);
+
+ /*
+ * A helper method to dispatch a task through nsIDirectTaskDispatcher to the
+ * current thread.
+ */
+ void dispatchDirectTaskToCurrentThread(in nsIRunnable event);
+
+ /**
+ * Enter a nested event loop on the current thread, waiting on, and
+ * processing events until condition.isDone() returns true.
+ *
+ * If condition.isDone() throws, this function will throw as well.
+ *
+ * C++ code should not use this function, instead preferring
+ * mozilla::SpinEventLoopUntil.
+ */
+ void spinEventLoopUntil(in ACString aVeryGoodReasonToDoThis, in nsINestedEventLoopCondition condition);
+
+ /**
+ * Similar to the previous method, but the spinning of the event loop
+ * terminates when the quit application shutting down starts.
+ *
+ * C++ code should not use this function, instead preferring
+ * mozilla::SpinEventLoopUntil.
+ */
+ void spinEventLoopUntilOrQuit(in ACString aVeryGoodReasonToDoThis, in nsINestedEventLoopCondition condition);
+
+ /**
+ * Spin the current thread's event loop until there are no more pending
+ * events. This could be done with spinEventLoopUntil, but that would
+ * require access to the current thread from JavaScript, which we are
+ * moving away from.
+ */
+ void spinEventLoopUntilEmpty();
+
+ /**
+ * Return the EventTarget for the main thread.
+ */
+ readonly attribute nsIEventTarget mainThreadEventTarget;
+};
diff --git a/xpcom/threads/nsIThreadPool.idl b/xpcom/threads/nsIThreadPool.idl
new file mode 100644
index 0000000000..65162779fa
--- /dev/null
+++ b/xpcom/threads/nsIThreadPool.idl
@@ -0,0 +1,115 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIEventTarget.idl"
+#include "nsIThread.idl"
+
+[uuid(ef194cab-3f86-4b61-b132-e5e96a79e5d1)]
+interface nsIThreadPoolListener : nsISupports
+{
+ /**
+ * Called when a new thread is created by the thread pool. The notification
+ * happens on the newly-created thread.
+ */
+ void onThreadCreated();
+
+ /**
+ * Called when a thread is about to be destroyed by the thread pool. The
+ * notification happens on the thread that is about to be destroyed.
+ */
+ void onThreadShuttingDown();
+};
+
+/**
+ * An interface to a thread pool. A thread pool creates a limited number of
+ * anonymous (unnamed) worker threads. An event dispatched to the thread pool
+ * will be run on the next available worker thread.
+ */
+[uuid(76ce99c9-8e43-489a-9789-f27cc4424965)]
+interface nsIThreadPool : nsIEventTarget
+{
+ /**
+ * Set the entire pool's QoS priority. If the priority has not changed, do nothing.
+ * Existing threads will update their QoS priority the next time they become
+ * active, and newly created threads will set this QoS priority upon creation.
+ */
+ [noscript] void setQoSForThreads(in nsIThread_QoSPriority aPriority);
+
+ /**
+ * Shutdown the thread pool. This method may not be executed from any thread
+ * in the thread pool. Instead, it is meant to be executed from another
+ * thread (usually the thread that created this thread pool). When this
+ * function returns, the thread pool and all of its threads will be shutdown,
+ * and it will no longer be possible to dispatch tasks to the thread pool.
+ *
+ * As a side effect, events on the current thread will be processed.
+ */
+ void shutdown();
+
+ /**
+ * Shutdown the thread pool, but only wait for aTimeoutMs. After the timeout
+ * expires, any threads that have not shutdown yet are leaked and will not
+ * block shutdown.
+ *
+ * This method should only be used at during shutdown to cleanup threads that
+ * made blocking calls to code outside our control, and can't be safely
+ * terminated. We choose to leak them intentionally to avoid a shutdown hang.
+ */
+ [noscript] void shutdownWithTimeout(in long aTimeoutMs);
+
+ /**
+ * Get/set the maximum number of threads allowed at one time in this pool.
+ */
+ attribute unsigned long threadLimit;
+
+ /**
+ * Get/set the maximum number of idle threads kept alive.
+ */
+ attribute unsigned long idleThreadLimit;
+
+ /**
+ * Get/set the amount of time in milliseconds before an idle thread is
+ * destroyed.
+ */
+ attribute unsigned long idleThreadTimeout;
+
+ /**
+ * If set to true the idle timeout will be calculated as idleThreadTimeout
+ * divideded by the number of idle threads at the moment. This may help
+ * save memory allocations but still keep reasonable amount of idle threads.
+ * Default is false, use |idleThreadTimeout| for all threads.
+ */
+ attribute boolean idleThreadTimeoutRegressive;
+
+ /**
+ * Get/set the number of bytes reserved for the stack of all threads in
+ * the pool. By default this is nsIThreadManager::DEFAULT_STACK_SIZE.
+ */
+ attribute unsigned long threadStackSize;
+
+ /**
+ * An optional listener that will be notified when a thread is created or
+ * destroyed in the course of the thread pool's operation.
+ *
+ * A listener will only receive notifications about threads created after the
+ * listener is set so it is recommended that the consumer set the listener
+ * before dispatching the first event. A listener that receives an
+ * onThreadCreated() notification is guaranteed to always receive the
+ * corresponding onThreadShuttingDown() notification.
+ *
+ * The thread pool takes ownership of the listener and releases it when the
+ * shutdown() method is called. Threads created after the listener is set will
+ * also take ownership of the listener so that the listener will be kept alive
+ * long enough to receive the guaranteed onThreadShuttingDown() notification.
+ */
+ attribute nsIThreadPoolListener listener;
+
+ /**
+ * Set the label for threads in the pool. All threads will be named
+ * "<aName> #<n>", where <n> is a serial number.
+ */
+ void setName(in ACString aName);
+};
diff --git a/xpcom/threads/nsIThreadShutdown.idl b/xpcom/threads/nsIThreadShutdown.idl
new file mode 100644
index 0000000000..a08d64165b
--- /dev/null
+++ b/xpcom/threads/nsIThreadShutdown.idl
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+interface nsIRunnable;
+
+/**
+ * Handle for the ongoing shutdown progress of a given thread which can be used
+ * to observe and interrupt async shutdown progress. Methods on this interface
+ * may generally only be used on the thread which called
+ * `nsIThread::beginShutdown`.
+ */
+[scriptable, builtinclass, uuid(70a43748-6130-4ea6-a440-7c74e1b7dd7c)]
+interface nsIThreadShutdown : nsISupports
+{
+ /**
+ * Register a runnable to be executed when the thread has completed shutdown,
+ * or shutdown has been cancelled due to `stopWaitingAndLeakThread()`.
+ *
+ * If the thread has already completed or cancelled shutdown, the runnable
+ * may be executed synchronously.
+ *
+ * May only be called on the thread which invoked `nsIThread::beginShutdown`.
+ */
+ void onCompletion(in nsIRunnable aEvent);
+
+ /**
+ * Check if the target thread has completed shutdown.
+ *
+ * May only be accessed on the thread which called `nsIThread::beginShutdown`.
+ */
+ [infallible] readonly attribute boolean completed;
+
+ /**
+ * Give up on waiting for the shutting down thread to exit. Calling this
+ * method will allow the thread to continue running, no longer block shutdown,
+ * and the thread will never be joined or have its resources reclaimed.
+ *
+ * Completion callbacks attached to this `nsIThreadShutdown` may be executed
+ * during this call.
+ *
+ * This method should NOT be called except in exceptional circumstances during
+ * shutdown, as it will cause resources for the shutting down thread to be
+ * leaked.
+ *
+ * May only be called on the thread which called `nsIThread::beginShutdown`
+ *
+ * @throws NS_ERROR_NOT_AVAILABLE
+ * Indicates that the target thread has already stopped running and a
+ * request to be joined is already being dispatched to the waiting thread.
+ */
+ void stopWaitingAndLeakThread();
+};
diff --git a/xpcom/threads/nsITimer.idl b/xpcom/threads/nsITimer.idl
new file mode 100644
index 0000000000..5d20c315b4
--- /dev/null
+++ b/xpcom/threads/nsITimer.idl
@@ -0,0 +1,376 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+#include "nsINamed.idl"
+
+interface nsIObserver;
+interface nsIEventTarget;
+
+%{C++
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/TimeStamp.h"
+#include <functional>
+
+/**
+ * The signature of the timer callback function passed to initWithFuncCallback.
+ * This is the function that will get called when the timer expires if the
+ * timer is initialized via initWithFuncCallback.
+ *
+ * @param aTimer the timer which has expired
+ * @param aClosure opaque parameter passed to initWithFuncCallback
+ */
+class nsITimer;
+typedef void (*nsTimerCallbackFunc) (nsITimer *aTimer, void *aClosure);
+%}
+
+native MallocSizeOf(mozilla::MallocSizeOf);
+native nsTimerCallbackFunc(nsTimerCallbackFunc);
+[ref] native TimeDuration(mozilla::TimeDuration);
+
+/**
+ * The callback interface for timers.
+ */
+interface nsITimer;
+
+[function, scriptable, uuid(a796816d-7d47-4348-9ab8-c7aeb3216a7d)]
+interface nsITimerCallback : nsISupports
+{
+ /**
+ * @param aTimer the timer which has expired
+ */
+ void notify(in nsITimer timer);
+};
+
+%{C++
+// Two timer deadlines must differ by less than half the PRIntervalTime domain.
+#define DELAY_INTERVAL_LIMIT PR_BIT(8 * sizeof(PRIntervalTime) - 1)
+%}
+
+/**
+ * nsITimer instances must be initialized by calling one of the "init" methods
+ * documented below. You may also re-initialize (using one of the init()
+ * methods) an existing instance to avoid the overhead of destroying and
+ * creating a timer. It is not necessary to cancel the timer in that case.
+ *
+ * By default a timer will fire on the thread that created it. Set the .target
+ * attribute to fire on a different thread. Once you have set a timer's .target
+ * and called one of its init functions, any further interactions with the timer
+ * (calling cancel(), changing member fields, etc) should only be done by the
+ * target thread, or races may occur with bad results like timers firing after
+ * they've been canceled, and/or not firing after re-initiatization.
+ */
+[scriptable, builtinclass, uuid(3de4b105-363c-482c-a409-baac83a01bfc)]
+interface nsITimer : nsISupports
+{
+ /* Timer types */
+
+ /**
+ * Type of a timer that fires once only.
+ */
+ const short TYPE_ONE_SHOT = 0;
+
+ /**
+ * After firing, a TYPE_REPEATING_SLACK timer is stopped and not restarted
+ * until its callback completes. Specified timer period will be at least
+ * the time between when processing for last firing the callback completes
+ * and when the next firing occurs.
+ *
+ * This is the preferable repeating type for most situations.
+ */
+ const short TYPE_REPEATING_SLACK = 1;
+
+ /**
+ * TYPE_REPEATING_PRECISE is just a synonym for
+ * TYPE_REPEATING_PRECISE_CAN_SKIP. They used to be distinct, but the old
+ * TYPE_REPEATING_PRECISE kind was similar to TYPE_REPEATING_PRECISE_CAN_SKIP
+ * while also being less useful. So the distinction was removed.
+ */
+ const short TYPE_REPEATING_PRECISE = 2;
+
+ /**
+ * A TYPE_REPEATING_PRECISE_CAN_SKIP repeating timer aims to have constant
+ * period between firings. The processing time for each timer callback will
+ * not influence the timer period. If the callback finishes after the next
+ * firing(s) should have happened (either because the callback took a long
+ * time, or the callback was called extremely late), that firing(s) is
+ * skipped, but the following sequence of firing times will not be altered.
+ * This timer type guarantees that it will not queue up new events to fire
+ * the callback until the previous callback event finishes firing. This is
+ * the only non-slack timer available.
+ */
+ const short TYPE_REPEATING_PRECISE_CAN_SKIP = 3;
+
+ /**
+ * Same as TYPE_REPEATING_SLACK with the exception that idle events
+ * won't yield to timers with this type. Use this when you want an
+ * idle callback to be scheduled to run even though this timer is
+ * about to fire.
+ */
+ const short TYPE_REPEATING_SLACK_LOW_PRIORITY = 4;
+
+ /**
+ * Same as TYPE_ONE_SHOT with the exception that idle events won't
+ * yield to timers with this type. Use this when you want an idle
+ * callback to be scheduled to run even though this timer is about
+ * to fire.
+ */
+ const short TYPE_ONE_SHOT_LOW_PRIORITY = 5;
+
+ /**
+ * Initialize a timer that will fire after the said delay.
+ * A user must keep a reference to this timer till it is
+ * is no longer needed or has been cancelled.
+ *
+ * @param aObserver the callback object that observes the
+ * ``timer-callback'' topic with the subject being
+ * the timer itself when the timer fires:
+ *
+ * observe(nsISupports aSubject, => nsITimer
+ * string aTopic, => ``timer-callback''
+ * wstring data => null
+ *
+ * @param aDelayInMs delay in milliseconds for timer to fire
+ * @param aType timer type per TYPE* consts defined above
+ */
+ void init(in nsIObserver aObserver, in unsigned long aDelayInMs,
+ in unsigned long aType);
+
+
+ /**
+ * Initialize a timer to fire after the given millisecond interval.
+ * This version takes a callback object.
+ *
+ * @param aFunc nsITimerCallback interface to call when timer expires
+ * @param aDelayInMs The millisecond interval
+ * @param aType Timer type per TYPE* consts defined above
+ */
+ void initWithCallback(in nsITimerCallback aCallback,
+ in unsigned long aDelayInMs,
+ in unsigned long aType);
+
+ /**
+ * Initialize a timer to fire after the high resolution TimeDuration.
+ * This version takes a callback object.
+ *
+ * @param aFunc nsITimerCallback interface to call when timer expires
+ * @param aDelay The high resolution interval
+ * @param aType Timer type per TYPE* consts defined above
+ */
+ [noscript] void initHighResolutionWithCallback(in nsITimerCallback aCallback,
+ [const] in TimeDuration aDelay,
+ in unsigned long aType);
+
+ /**
+ * Cancel the timer. This method works on all types, not just on repeating
+ * timers -- you might want to cancel a TYPE_ONE_SHOT timer, and even reuse
+ * it by re-initializing it (to avoid object destruction and creation costs
+ * by conserving one timer instance).
+ */
+ void cancel();
+
+ /**
+ * Like initWithFuncCallback, but also takes a name for the timer; the name
+ * will be used when timer profiling is enabled via the "TimerFirings" log
+ * module.
+ *
+ * @param aFunc The function to invoke
+ * @param aClosure An opaque pointer to pass to that function
+ * @param aDelay The millisecond interval
+ * @param aType Timer type per TYPE* consts defined above
+ * @param aName The timer's name
+ */
+ [noscript] void initWithNamedFuncCallback(in nsTimerCallbackFunc aCallback,
+ in voidPtr aClosure,
+ in unsigned long aDelay,
+ in unsigned long aType,
+ in string aName);
+
+ /**
+ * Initialize a timer to fire after the high resolution TimeDuration.
+ * This version takes a named function callback.
+ *
+ * @param aFunc The function to invoke
+ * @param aClosure An opaque pointer to pass to that function
+ * @param aDelay The high resolution interval
+ * @param aType Timer type per TYPE* consts defined above
+ * @param aName The timer's name
+ */
+ [noscript] void initHighResolutionWithNamedFuncCallback(
+ in nsTimerCallbackFunc aCallback,
+ in voidPtr aClosure,
+ [const] in TimeDuration aDelay,
+ in unsigned long aType,
+ in string aName);
+
+ /**
+ * The millisecond delay of the timeout.
+ *
+ * NOTE: Re-setting the delay on a one-shot timer that has already fired
+ * doesn't restart the timer. Call one of the init() methods to restart
+ * a one-shot timer.
+ */
+ attribute unsigned long delay;
+
+ /**
+ * The timer type - one of the above TYPE_* constants.
+ */
+ attribute unsigned long type;
+
+ /**
+ * The opaque pointer pass to initWithFuncCallback.
+ */
+ [noscript] readonly attribute voidPtr closure;
+
+ /**
+ * The nsITimerCallback object passed to initWithCallback.
+ */
+ readonly attribute nsITimerCallback callback;
+
+ /**
+ * The nsIEventTarget where the callback will be dispatched. Note that this
+ * target may only be set before the call to one of the init methods above.
+ *
+ * By default the target is the thread that created the timer.
+ */
+ attribute nsIEventTarget target;
+
+ readonly attribute ACString name;
+
+ /**
+ * The number of microseconds this nsITimer implementation can possibly
+ * fire early.
+ */
+ [noscript] readonly attribute unsigned long allowedEarlyFiringMicroseconds;
+
+ [notxpcom, nostdcall] size_t sizeOfIncludingThis(in MallocSizeOf aMallocSizeOf);
+};
+
+%{C++
+#include "nsCOMPtr.h"
+
+already_AddRefed<nsITimer> NS_NewTimer();
+
+already_AddRefed<nsITimer> NS_NewTimer(nsIEventTarget* aTarget);
+
+nsresult
+NS_NewTimerWithObserver(nsITimer** aTimer,
+ nsIObserver* aObserver,
+ uint32_t aDelay,
+ uint32_t aType,
+ nsIEventTarget* aTarget = nullptr);
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult>
+NS_NewTimerWithObserver(nsIObserver* aObserver,
+ uint32_t aDelay,
+ uint32_t aType,
+ nsIEventTarget* aTarget = nullptr);
+
+nsresult
+NS_NewTimerWithCallback(nsITimer** aTimer,
+ nsITimerCallback* aCallback,
+ uint32_t aDelay,
+ uint32_t aType,
+ nsIEventTarget* aTarget = nullptr);
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult>
+NS_NewTimerWithCallback(nsITimerCallback* aCallback,
+ uint32_t aDelay,
+ uint32_t aType,
+ nsIEventTarget* aTarget = nullptr);
+
+nsresult
+NS_NewTimerWithCallback(nsITimer** aTimer,
+ nsITimerCallback* aCallback,
+ const mozilla::TimeDuration& aDelay,
+ uint32_t aType,
+ nsIEventTarget* aTarget = nullptr);
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult>
+NS_NewTimerWithCallback(nsITimerCallback* aCallback,
+ const mozilla::TimeDuration& aDelay,
+ uint32_t aType,
+ nsIEventTarget* aTarget = nullptr);
+
+nsresult
+NS_NewTimerWithCallback(nsITimer** aTimer,
+ std::function<void(nsITimer*)>&& aCallback,
+ uint32_t aDelay,
+ uint32_t aType,
+ const char* aNameString,
+ nsIEventTarget* aTarget = nullptr);
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult>
+NS_NewTimerWithCallback(std::function<void(nsITimer*)>&& aCallback,
+ uint32_t aDelay,
+ uint32_t aType,
+ const char* aNameString,
+ nsIEventTarget* aTarget = nullptr);
+
+nsresult
+NS_NewTimerWithCallback(nsITimer** aTimer,
+ std::function<void(nsITimer*)>&& aCallback,
+ const mozilla::TimeDuration& aDelay,
+ uint32_t aType,
+ const char* aNameString,
+ nsIEventTarget* aTarget = nullptr);
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult>
+NS_NewTimerWithCallback(std::function<void(nsITimer*)>&& aCallback,
+ const mozilla::TimeDuration& aDelay,
+ uint32_t aType,
+ const char* aNameString,
+ nsIEventTarget* aTarget = nullptr);
+
+nsresult
+NS_NewTimerWithFuncCallback(nsITimer** aTimer,
+ nsTimerCallbackFunc aCallback,
+ void* aClosure,
+ uint32_t aDelay,
+ uint32_t aType,
+ const char* aNameString,
+ nsIEventTarget* aTarget = nullptr);
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult>
+NS_NewTimerWithFuncCallback(nsTimerCallbackFunc aCallback,
+ void* aClosure,
+ uint32_t aDelay,
+ uint32_t aType,
+ const char* aNameString,
+ nsIEventTarget* aTarget = nullptr);
+
+nsresult
+NS_NewTimerWithFuncCallback(nsITimer** aTimer,
+ nsTimerCallbackFunc aCallback,
+ void* aClosure,
+ const mozilla::TimeDuration& aDelay,
+ uint32_t aType,
+ const char* aNameString,
+ nsIEventTarget* aTarget = nullptr);
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult>
+NS_NewTimerWithFuncCallback(nsTimerCallbackFunc aCallback,
+ void* aClosure,
+ const mozilla::TimeDuration& aDelay,
+ uint32_t aType,
+ const char* aNameString,
+ nsIEventTarget* aTarget = nullptr);
+
+#define NS_TIMER_CALLBACK_TOPIC "timer-callback"
+
+#ifndef RELEASE_OR_BETA
+#undef NS_DECL_NSITIMERCALLBACK
+#define NS_DECL_NSITIMERCALLBACK \
+ NS_IMETHOD Notify(nsITimer *timer) override; \
+ inline void _ensure_GetName_exists(void) { \
+ static_assert(std::is_convertible<decltype(this), nsINamed*>::value, \
+ "nsITimerCallback implementations must also implement nsINamed"); \
+ }
+#endif
+%}
+
+[scriptable, builtinclass, uuid(5482506d-1d21-4d08-b01c-95c87e1295ad)]
+interface nsITimerManager : nsISupports
+{
+ /**
+ * Returns a read-only list of nsITimer objects, implementing only the name,
+ * delay and type attribute getters.
+ * This is meant to be used for tests, to verify that no timer is leftover
+ * at the end of a test. */
+ Array<nsITimer> getTimers();
+};
diff --git a/xpcom/threads/nsMemoryPressure.cpp b/xpcom/threads/nsMemoryPressure.cpp
new file mode 100644
index 0000000000..dbd3a92f79
--- /dev/null
+++ b/xpcom/threads/nsMemoryPressure.cpp
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsMemoryPressure.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Services.h"
+
+#include "nsThreadUtils.h"
+#include "nsIObserverService.h"
+
+using namespace mozilla;
+
+const char* const kTopicMemoryPressure = "memory-pressure";
+const char* const kTopicMemoryPressureStop = "memory-pressure-stop";
+const char16_t* const kSubTopicLowMemoryNew = u"low-memory";
+const char16_t* const kSubTopicLowMemoryOngoing = u"low-memory-ongoing";
+
+// This is accessed from any thread through NS_NotifyOfEventualMemoryPressure
+static Atomic<MemoryPressureState, Relaxed> sMemoryPressurePending(
+ MemoryPressureState::NoPressure);
+
+void NS_NotifyOfEventualMemoryPressure(MemoryPressureState aState) {
+ MOZ_ASSERT(aState != MemoryPressureState::None);
+
+ /*
+ * A new memory pressure event erases an ongoing (or stop of) memory pressure,
+ * but an existing "new" memory pressure event takes precedence over a new
+ * "ongoing" or "stop" memory pressure event.
+ */
+ switch (aState) {
+ case MemoryPressureState::None:
+ case MemoryPressureState::LowMemory:
+ sMemoryPressurePending = aState;
+ break;
+ case MemoryPressureState::NoPressure:
+ sMemoryPressurePending.compareExchange(MemoryPressureState::None, aState);
+ break;
+ }
+}
+
+nsresult NS_NotifyOfMemoryPressure(MemoryPressureState aState) {
+ NS_NotifyOfEventualMemoryPressure(aState);
+ nsCOMPtr<nsIRunnable> event =
+ new Runnable("NS_DispatchEventualMemoryPressure");
+ return NS_DispatchToMainThread(event);
+}
+
+void NS_DispatchMemoryPressure() {
+ MOZ_ASSERT(NS_IsMainThread());
+ static MemoryPressureState sMemoryPressureStatus =
+ MemoryPressureState::NoPressure;
+
+ MemoryPressureState mpPending =
+ sMemoryPressurePending.exchange(MemoryPressureState::None);
+ if (mpPending == MemoryPressureState::None) {
+ return;
+ }
+
+ nsCOMPtr<nsIObserverService> os = services::GetObserverService();
+ if (!os) {
+ NS_WARNING("Can't get observer service!");
+ return;
+ }
+
+ switch (mpPending) {
+ case MemoryPressureState::None:
+ MOZ_ASSERT_UNREACHABLE("Already handled this case above.");
+ break;
+ case MemoryPressureState::LowMemory:
+ switch (sMemoryPressureStatus) {
+ case MemoryPressureState::None:
+ MOZ_ASSERT_UNREACHABLE("The internal status should never be None.");
+ break;
+ case MemoryPressureState::NoPressure:
+ sMemoryPressureStatus = MemoryPressureState::LowMemory;
+ os->NotifyObservers(nullptr, kTopicMemoryPressure,
+ kSubTopicLowMemoryNew);
+ break;
+ case MemoryPressureState::LowMemory:
+ os->NotifyObservers(nullptr, kTopicMemoryPressure,
+ kSubTopicLowMemoryOngoing);
+ break;
+ }
+ break;
+ case MemoryPressureState::NoPressure:
+ switch (sMemoryPressureStatus) {
+ case MemoryPressureState::None:
+ MOZ_ASSERT_UNREACHABLE("The internal status should never be None.");
+ break;
+ case MemoryPressureState::NoPressure:
+ // Already no pressure. Do nothing.
+ break;
+ case MemoryPressureState::LowMemory:
+ sMemoryPressureStatus = MemoryPressureState::NoPressure;
+ os->NotifyObservers(nullptr, kTopicMemoryPressureStop, nullptr);
+ break;
+ }
+ break;
+ }
+}
diff --git a/xpcom/threads/nsMemoryPressure.h b/xpcom/threads/nsMemoryPressure.h
new file mode 100644
index 0000000000..5a68b0bce5
--- /dev/null
+++ b/xpcom/threads/nsMemoryPressure.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsMemoryPressure_h__
+#define nsMemoryPressure_h__
+
+#include "nscore.h"
+
+/*
+ * These pre-defined strings are the topic to pass to the observer
+ * service to declare the memory-pressure or lift the memory-pressure.
+ *
+ * 1. Notify kTopicMemoryPressure with kSubTopicLowMemoryNew
+ * New memory pressure deteced
+ * On a new memory pressure, we stop everything to start cleaning
+ * aggresively the memory used, in order to free as much memory as
+ * possible.
+ *
+ * 2. Notify kTopicMemoryPressure with kSubTopicLowMemoryOngoing
+ * Repeated memory pressure.
+ * A repeated memory pressure implies to clean softly recent allocations.
+ * It is supposed to happen after a new memory pressure which already
+ * cleaned aggressivley. So there is no need to damage the reactivity of
+ * Gecko by stopping the world again.
+ *
+ * In case of conflict with an new memory pressue, the new memory pressure
+ * takes precedence over an ongoing memory pressure. The reason being
+ * that if no events are processed between 2 notifications (new followed
+ * by ongoing, or ongoing followed by a new) we want to be as aggresive as
+ * possible on the clean-up of the memory. After all, we are trying to
+ * keep Gecko alive as long as possible.
+ *
+ * 3. Notify kTopicMemoryPressureStop with nullptr
+ * Memory pressure stopped.
+ * We're no longer under acute memory pressure, so we might want to have a
+ * chance of (cautiously) re-enabling some things we previously turned off.
+ * As above, an already enqueued new memory pressure event takes precedence.
+ * The priority ordering between concurrent attempts to queue both stopped
+ * and ongoing memory pressure is currently not defined.
+ */
+extern const char* const kTopicMemoryPressure;
+extern const char* const kTopicMemoryPressureStop;
+extern const char16_t* const kSubTopicLowMemoryNew;
+extern const char16_t* const kSubTopicLowMemoryOngoing;
+
+enum class MemoryPressureState : uint32_t {
+ None, // For internal use. Don't use this value.
+ LowMemory,
+ NoPressure,
+};
+
+/**
+ * This function causes the main thread to fire a memory pressure event
+ * before processing the next event, but if there are no events pending in
+ * the main thread's event queue, the memory pressure event would not be
+ * dispatched until one is enqueued. It is infallible and does not allocate
+ * any memory.
+ *
+ * You may call this function from any thread.
+ */
+void NS_NotifyOfEventualMemoryPressure(MemoryPressureState aState);
+
+/**
+ * This function causes the main thread to fire a memory pressure event
+ * before processing the next event. We wake up the main thread by adding a
+ * dummy event to its event loop, so, unlike with
+ * NS_NotifyOfEventualMemoryPressure, this memory-pressure event is always
+ * fired relatively quickly, even if the event loop is otherwise empty.
+ *
+ * You may call this function from any thread.
+ */
+nsresult NS_NotifyOfMemoryPressure(MemoryPressureState aState);
+
+#endif // nsMemoryPressure_h__
diff --git a/xpcom/threads/nsProcess.h b/xpcom/threads/nsProcess.h
new file mode 100644
index 0000000000..95d6748640
--- /dev/null
+++ b/xpcom/threads/nsProcess.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsPROCESSWIN_H_
+#define _nsPROCESSWIN_H_
+
+#if defined(XP_WIN)
+# define PROCESSMODEL_WINAPI
+#endif
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Mutex.h"
+#include "nsIProcess.h"
+#include "nsIObserver.h"
+#include "nsMaybeWeakPtr.h"
+#include "nsString.h"
+#ifndef XP_UNIX
+# include "prproces.h"
+#endif
+#if defined(PROCESSMODEL_WINAPI)
+# include <windows.h>
+# include <shellapi.h>
+#endif
+
+#define NS_PROCESS_CID \
+ { \
+ 0x7b4eeb20, 0xd781, 0x11d4, { \
+ 0x8A, 0x83, 0x00, 0x10, 0xa4, 0xe0, 0xc9, 0xca \
+ } \
+ }
+
+class nsIFile;
+
+class nsProcess final : public nsIProcess, public nsIObserver {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIPROCESS
+ NS_DECL_NSIOBSERVER
+
+ nsProcess();
+
+ private:
+ ~nsProcess();
+ PRThread* CreateMonitorThread();
+ static void Monitor(void* aArg);
+ void ProcessComplete();
+ nsresult CopyArgsAndRunProcess(bool aBlocking, const char** aArgs,
+ uint32_t aCount, nsIObserver* aObserver,
+ bool aHoldWeak);
+ nsresult CopyArgsAndRunProcessw(bool aBlocking, const char16_t** aArgs,
+ uint32_t aCount, nsIObserver* aObserver,
+ bool aHoldWeak);
+ // The 'args' array is null-terminated.
+ nsresult RunProcess(bool aBlocking, char** aArgs, nsIObserver* aObserver,
+ bool aHoldWeak, bool aArgsUTF8);
+
+ PRThread* mThread;
+ mozilla::Mutex mLock;
+ bool mShutdown MOZ_GUARDED_BY(mLock);
+ bool mBlocking;
+ bool mStartHidden;
+ bool mNoShell;
+
+ nsCOMPtr<nsIFile> mExecutable;
+ nsString mTargetPath;
+ int32_t mPid;
+ nsMaybeWeakPtr<nsIObserver> mObserver;
+
+ // These members are modified by multiple threads, any accesses should be
+ // protected with mLock.
+ int32_t mExitValue MOZ_GUARDED_BY(mLock);
+#if defined(PROCESSMODEL_WINAPI)
+ HANDLE mProcess MOZ_GUARDED_BY(mLock);
+#elif !defined(XP_UNIX)
+ PRProcess* mProcess MOZ_GUARDED_BY(mLock);
+#endif
+};
+
+#endif
diff --git a/xpcom/threads/nsProcessCommon.cpp b/xpcom/threads/nsProcessCommon.cpp
new file mode 100644
index 0000000000..0a88488e5f
--- /dev/null
+++ b/xpcom/threads/nsProcessCommon.cpp
@@ -0,0 +1,600 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*****************************************************************************
+ *
+ * nsProcess is used to execute new processes and specify if you want to
+ * wait (blocking) or continue (non-blocking).
+ *
+ *****************************************************************************
+ */
+
+#include "mozilla/ArrayUtils.h"
+
+#include "nsCOMPtr.h"
+#include "nsIFile.h"
+#include "nsProcess.h"
+#include "prio.h"
+#include "prenv.h"
+#include "nsCRT.h"
+#include "nsThreadUtils.h"
+#include "nsIObserverService.h"
+#include "nsXULAppAPI.h"
+#include "mozilla/Services.h"
+
+#include <stdlib.h>
+
+#if defined(PROCESSMODEL_WINAPI)
+# include "nsString.h"
+# include "nsLiteralString.h"
+# include "nsReadableUtils.h"
+# include "mozilla/AssembleCmdLine.h"
+# include "mozilla/UniquePtrExtensions.h"
+#else
+# ifdef XP_MACOSX
+# include <crt_externs.h>
+# include <spawn.h>
+# endif
+# ifdef XP_UNIX
+# ifndef XP_MACOSX
+# include "base/process_util.h"
+# endif
+# include <sys/wait.h>
+# include <sys/errno.h>
+# endif
+# include <sys/types.h>
+# include <signal.h>
+#endif
+
+using namespace mozilla;
+
+//-------------------------------------------------------------------//
+// nsIProcess implementation
+//-------------------------------------------------------------------//
+NS_IMPL_ISUPPORTS(nsProcess, nsIProcess, nsIObserver)
+
+// Constructor
+nsProcess::nsProcess()
+ : mThread(nullptr),
+ mLock("nsProcess.mLock"),
+ mShutdown(false),
+ mBlocking(false),
+ mStartHidden(false),
+ mNoShell(false),
+ mPid(-1),
+ mExitValue(-1)
+#if !defined(XP_UNIX)
+ ,
+ mProcess(nullptr)
+#endif
+{
+}
+
+// Destructor
+nsProcess::~nsProcess() = default;
+
+NS_IMETHODIMP
+nsProcess::Init(nsIFile* aExecutable) {
+ if (mExecutable) {
+ return NS_ERROR_ALREADY_INITIALIZED;
+ }
+
+ if (NS_WARN_IF(!aExecutable)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+ bool isFile;
+
+ // First make sure the file exists
+ nsresult rv = aExecutable->IsFile(&isFile);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ if (!isFile) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Store the nsIFile in mExecutable
+ mExecutable = aExecutable;
+ // Get the path because it is needed by the NSPR process creation
+#ifdef XP_WIN
+ rv = mExecutable->GetTarget(mTargetPath);
+ if (NS_FAILED(rv) || mTargetPath.IsEmpty())
+#endif
+ rv = mExecutable->GetPath(mTargetPath);
+
+ return rv;
+}
+
+void nsProcess::Monitor(void* aArg) {
+ RefPtr<nsProcess> process = dont_AddRef(static_cast<nsProcess*>(aArg));
+
+ if (!process->mBlocking) {
+ NS_SetCurrentThreadName("RunProcess");
+ }
+
+#if defined(PROCESSMODEL_WINAPI)
+ HANDLE processHandle;
+ {
+ // The mutex region cannot include WaitForSingleObject otherwise we'll
+ // block calls such as Kill. So lock on access and store a local.
+ MutexAutoLock lock(process->mLock);
+ processHandle = process->mProcess;
+ }
+
+ DWORD dwRetVal;
+ unsigned long exitCode = -1;
+
+ dwRetVal = WaitForSingleObject(processHandle, INFINITE);
+ if (dwRetVal != WAIT_FAILED) {
+ if (GetExitCodeProcess(processHandle, &exitCode) == FALSE) {
+ exitCode = -1;
+ }
+ }
+
+ // Lock in case Kill or GetExitCode are called during this.
+ {
+ MutexAutoLock lock(process->mLock);
+ CloseHandle(process->mProcess);
+ process->mProcess = nullptr;
+ process->mExitValue = exitCode;
+ if (process->mShutdown) {
+ return;
+ }
+ }
+#else
+# ifdef XP_UNIX
+ int exitCode = -1;
+ int status = 0;
+ pid_t result;
+ do {
+ result = waitpid(process->mPid, &status, 0);
+ } while (result == -1 && errno == EINTR);
+ if (result == process->mPid) {
+ if (WIFEXITED(status)) {
+ exitCode = WEXITSTATUS(status);
+ } else if (WIFSIGNALED(status)) {
+ exitCode = 256; // match NSPR's signal exit status
+ }
+ }
+# else
+ int32_t exitCode = -1;
+ PRProcess* prProcess;
+ {
+ // The mutex region cannot include PR_WaitProcess otherwise we'll
+ // block calls such as Kill. So lock on access and store a local.
+ MutexAutoLock lock(process->mLock);
+ prProcess = process->mProcess;
+ }
+ if (PR_WaitProcess(prProcess, &exitCode) != PR_SUCCESS) {
+ exitCode = -1;
+ }
+# endif
+
+ // Lock in case Kill or GetExitCode are called during this
+ {
+ MutexAutoLock lock(process->mLock);
+# if !defined(XP_UNIX)
+ process->mProcess = nullptr;
+# endif
+ process->mExitValue = exitCode;
+ if (process->mShutdown) {
+ return;
+ }
+ }
+#endif
+
+ // If we ran a background thread for the monitor then notify on the main
+ // thread
+ if (NS_IsMainThread()) {
+ process->ProcessComplete();
+ } else {
+ NS_DispatchToMainThread(NewRunnableMethod(
+ "nsProcess::ProcessComplete", process, &nsProcess::ProcessComplete));
+ }
+}
+
+void nsProcess::ProcessComplete() {
+ if (mThread) {
+ nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService();
+ if (os) {
+ os->RemoveObserver(this, "xpcom-shutdown");
+ }
+ PR_JoinThread(mThread);
+ mThread = nullptr;
+ }
+
+ const char* topic;
+ {
+ MutexAutoLock lock(mLock);
+ if (mExitValue != 0) {
+ topic = "process-failed";
+ } else {
+ topic = "process-finished";
+ }
+ }
+
+ mPid = -1;
+ nsCOMPtr<nsIObserver> observer = mObserver.GetValue();
+ mObserver = nullptr;
+
+ if (observer) {
+ observer->Observe(NS_ISUPPORTS_CAST(nsIProcess*, this), topic, nullptr);
+ }
+}
+
+// XXXldb |aArgs| has the wrong const-ness
+NS_IMETHODIMP
+nsProcess::Run(bool aBlocking, const char** aArgs, uint32_t aCount) {
+ return CopyArgsAndRunProcess(aBlocking, aArgs, aCount, nullptr, false);
+}
+
+// XXXldb |aArgs| has the wrong const-ness
+NS_IMETHODIMP
+nsProcess::RunAsync(const char** aArgs, uint32_t aCount, nsIObserver* aObserver,
+ bool aHoldWeak) {
+ return CopyArgsAndRunProcess(false, aArgs, aCount, aObserver, aHoldWeak);
+}
+
+nsresult nsProcess::CopyArgsAndRunProcess(bool aBlocking, const char** aArgs,
+ uint32_t aCount,
+ nsIObserver* aObserver,
+ bool aHoldWeak) {
+ // Add one to the aCount for the program name and one for null termination.
+ char** my_argv = nullptr;
+ my_argv = (char**)moz_xmalloc(sizeof(char*) * (aCount + 2));
+
+ my_argv[0] = ToNewUTF8String(mTargetPath);
+
+ for (uint32_t i = 0; i < aCount; ++i) {
+ my_argv[i + 1] = const_cast<char*>(aArgs[i]);
+ }
+
+ my_argv[aCount + 1] = nullptr;
+
+ nsresult rv = RunProcess(aBlocking, my_argv, aObserver, aHoldWeak, false);
+
+ free(my_argv[0]);
+ free(my_argv);
+ return rv;
+}
+
+// XXXldb |aArgs| has the wrong const-ness
+NS_IMETHODIMP
+nsProcess::Runw(bool aBlocking, const char16_t** aArgs, uint32_t aCount) {
+ return CopyArgsAndRunProcessw(aBlocking, aArgs, aCount, nullptr, false);
+}
+
+// XXXldb |aArgs| has the wrong const-ness
+NS_IMETHODIMP
+nsProcess::RunwAsync(const char16_t** aArgs, uint32_t aCount,
+ nsIObserver* aObserver, bool aHoldWeak) {
+ return CopyArgsAndRunProcessw(false, aArgs, aCount, aObserver, aHoldWeak);
+}
+
+nsresult nsProcess::CopyArgsAndRunProcessw(bool aBlocking,
+ const char16_t** aArgs,
+ uint32_t aCount,
+ nsIObserver* aObserver,
+ bool aHoldWeak) {
+ // Add one to the aCount for the program name and one for null termination.
+ char** my_argv = nullptr;
+ my_argv = (char**)moz_xmalloc(sizeof(char*) * (aCount + 2));
+
+ my_argv[0] = ToNewUTF8String(mTargetPath);
+
+ for (uint32_t i = 0; i < aCount; i++) {
+ my_argv[i + 1] = ToNewUTF8String(nsDependentString(aArgs[i]));
+ }
+
+ my_argv[aCount + 1] = nullptr;
+
+ nsresult rv = RunProcess(aBlocking, my_argv, aObserver, aHoldWeak, true);
+
+ for (uint32_t i = 0; i <= aCount; ++i) {
+ free(my_argv[i]);
+ }
+ free(my_argv);
+ return rv;
+}
+
+nsresult nsProcess::RunProcess(bool aBlocking, char** aMyArgv,
+ nsIObserver* aObserver, bool aHoldWeak,
+ bool aArgsUTF8) {
+ NS_WARNING_ASSERTION(!XRE_IsContentProcess(),
+ "No launching of new processes in the content process");
+
+ if (NS_WARN_IF(!mExecutable)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ if (NS_WARN_IF(mThread)) {
+ return NS_ERROR_ALREADY_INITIALIZED;
+ }
+
+ if (aObserver) {
+ if (aHoldWeak) {
+ nsresult rv = NS_OK;
+ mObserver = do_GetWeakReference(aObserver, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+ } else {
+ mObserver = aObserver;
+ }
+ }
+
+ {
+ MutexAutoLock lock(mLock);
+ mExitValue = -1;
+ mPid = -1;
+ }
+
+#if defined(PROCESSMODEL_WINAPI)
+ BOOL retVal;
+ UniqueFreePtr<wchar_t> cmdLine;
+
+ // |aMyArgv| is null-terminated and always starts with the program path. If
+ // the second slot is non-null then arguments are being passed.
+ if (aMyArgv[1] || mNoShell) {
+ // Pass the executable path as argv[0] to the launched program when calling
+ // CreateProcess().
+ char** argv = mNoShell ? aMyArgv : aMyArgv + 1;
+
+ wchar_t* assembledCmdLine = nullptr;
+ if (assembleCmdLine(argv, &assembledCmdLine,
+ aArgsUTF8 ? CP_UTF8 : CP_ACP) == -1) {
+ return NS_ERROR_FILE_EXECUTION_FAILED;
+ }
+ cmdLine.reset(assembledCmdLine);
+ }
+
+ // The program name in aMyArgv[0] is always UTF-8
+ NS_ConvertUTF8toUTF16 wideFile(aMyArgv[0]);
+
+ if (mNoShell) {
+ STARTUPINFO startupInfo;
+ ZeroMemory(&startupInfo, sizeof(startupInfo));
+ startupInfo.cb = sizeof(startupInfo);
+ startupInfo.dwFlags = STARTF_USESHOWWINDOW;
+ startupInfo.wShowWindow = mStartHidden ? SW_HIDE : SW_SHOWNORMAL;
+
+ PROCESS_INFORMATION processInfo;
+ retVal = CreateProcess(/* lpApplicationName = */ wideFile.get(),
+ /* lpCommandLine */ cmdLine.get(),
+ /* lpProcessAttributes = */ NULL,
+ /* lpThreadAttributes = */ NULL,
+ /* bInheritHandles = */ FALSE,
+ /* dwCreationFlags = */ 0,
+ /* lpEnvironment = */ NULL,
+ /* lpCurrentDirectory = */ NULL,
+ /* lpStartupInfo = */ &startupInfo,
+ /* lpProcessInformation */ &processInfo);
+
+ if (!retVal) {
+ return NS_ERROR_FILE_EXECUTION_FAILED;
+ }
+
+ CloseHandle(processInfo.hThread);
+
+ // TODO(bug 1763051): assess if we need further work around this locking.
+ MutexAutoLock lock(mLock);
+ mProcess = processInfo.hProcess;
+ } else {
+ SHELLEXECUTEINFOW sinfo;
+ memset(&sinfo, 0, sizeof(SHELLEXECUTEINFOW));
+ sinfo.cbSize = sizeof(SHELLEXECUTEINFOW);
+ sinfo.hwnd = nullptr;
+ sinfo.lpFile = wideFile.get();
+ sinfo.nShow = mStartHidden ? SW_HIDE : SW_SHOWNORMAL;
+
+ /* The SEE_MASK_NO_CONSOLE flag is important to prevent console windows
+ * from appearing. This makes behavior the same on all platforms. The flag
+ * will not have any effect on non-console applications.
+ */
+ sinfo.fMask =
+ SEE_MASK_FLAG_DDEWAIT | SEE_MASK_NO_CONSOLE | SEE_MASK_NOCLOSEPROCESS;
+
+ if (cmdLine) {
+ sinfo.lpParameters = cmdLine.get();
+ }
+
+ retVal = ShellExecuteExW(&sinfo);
+ if (!retVal) {
+ return NS_ERROR_FILE_EXECUTION_FAILED;
+ }
+
+ MutexAutoLock lock(mLock);
+ mProcess = sinfo.hProcess;
+ }
+
+ {
+ MutexAutoLock lock(mLock);
+ mPid = GetProcessId(mProcess);
+ }
+#elif defined(XP_MACOSX)
+ // Note: |aMyArgv| is already null-terminated as required by posix_spawnp.
+ pid_t newPid = 0;
+ int result = posix_spawnp(&newPid, aMyArgv[0], nullptr, nullptr, aMyArgv,
+ *_NSGetEnviron());
+ mPid = static_cast<int32_t>(newPid);
+
+ if (result != 0) {
+ return NS_ERROR_FAILURE;
+ }
+#elif defined(XP_UNIX)
+ base::LaunchOptions options;
+ std::vector<std::string> argvVec;
+ for (char** arg = aMyArgv; *arg != nullptr; ++arg) {
+ argvVec.push_back(*arg);
+ }
+ pid_t newPid;
+ if (base::LaunchApp(argvVec, options, &newPid).isOk()) {
+ static_assert(sizeof(pid_t) <= sizeof(int32_t),
+ "mPid is large enough to hold a pid");
+ mPid = static_cast<int32_t>(newPid);
+ } else {
+ return NS_ERROR_FAILURE;
+ }
+#else
+ {
+ PRProcess* prProcess =
+ PR_CreateProcess(aMyArgv[0], aMyArgv, nullptr, nullptr);
+ if (!prProcess) {
+ return NS_ERROR_FAILURE;
+ }
+ {
+ MutexAutoLock lock(mLock);
+ mProcess = prProcess;
+ }
+ struct MYProcess {
+ uint32_t pid;
+ };
+ MYProcess* ptrProc = (MYProcess*)mProcess;
+ mPid = ptrProc->pid;
+ }
+#endif
+
+ NS_ADDREF_THIS();
+ mBlocking = aBlocking;
+ if (aBlocking) {
+ Monitor(this);
+ MutexAutoLock lock(mLock);
+ if (mExitValue < 0) {
+ return NS_ERROR_FILE_EXECUTION_FAILED;
+ }
+ } else {
+ mThread = CreateMonitorThread();
+ if (!mThread) {
+ NS_RELEASE_THIS();
+ return NS_ERROR_FAILURE;
+ }
+
+ // It isn't a failure if we just can't watch for shutdown
+ nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService();
+ if (os) {
+ os->AddObserver(this, "xpcom-shutdown", false);
+ }
+ }
+
+ return NS_OK;
+}
+
+// We don't guarantee that monitor threads are joined before Gecko exits, which
+// can cause TSAN to complain about thread leaks. We handle this with a TSAN
+// suppression, and route thread creation through this helper so that the
+// suppression is as narrowly-scoped as possible.
+PRThread* nsProcess::CreateMonitorThread() {
+ return PR_CreateThread(PR_SYSTEM_THREAD, Monitor, this, PR_PRIORITY_NORMAL,
+ PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0);
+}
+
+NS_IMETHODIMP
+nsProcess::GetIsRunning(bool* aIsRunning) {
+ if (mThread) {
+ *aIsRunning = true;
+ } else {
+ *aIsRunning = false;
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::GetStartHidden(bool* aStartHidden) {
+ *aStartHidden = mStartHidden;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::SetStartHidden(bool aStartHidden) {
+ mStartHidden = aStartHidden;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::GetNoShell(bool* aNoShell) {
+ *aNoShell = mNoShell;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::SetNoShell(bool aNoShell) {
+ mNoShell = aNoShell;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::GetPid(uint32_t* aPid) {
+ if (!mThread) {
+ return NS_ERROR_FAILURE;
+ }
+ if (mPid < 0) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ *aPid = mPid;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::Kill() {
+ if (!mThread) {
+ return NS_ERROR_FAILURE;
+ }
+
+ {
+ MutexAutoLock lock(mLock);
+#if defined(PROCESSMODEL_WINAPI)
+ if (TerminateProcess(mProcess, 0) == 0) {
+ return NS_ERROR_FAILURE;
+ }
+#elif defined(XP_UNIX)
+ if (kill(mPid, SIGKILL) != 0) {
+ return NS_ERROR_FAILURE;
+ }
+#else
+ if (!mProcess || (PR_KillProcess(mProcess) != PR_SUCCESS)) {
+ return NS_ERROR_FAILURE;
+ }
+#endif
+ }
+
+ // We must null out mThread if we want IsRunning to return false immediately
+ // after this call.
+ nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService();
+ if (os) {
+ os->RemoveObserver(this, "xpcom-shutdown");
+ }
+ PR_JoinThread(mThread);
+ mThread = nullptr;
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::GetExitValue(int32_t* aExitValue) {
+ MutexAutoLock lock(mLock);
+
+ *aExitValue = mExitValue;
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::Observe(nsISupports* aSubject, const char* aTopic,
+ const char16_t* aData) {
+ // Shutting down, drop all references
+ if (mThread) {
+ nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService();
+ if (os) {
+ os->RemoveObserver(this, "xpcom-shutdown");
+ }
+ mThread = nullptr;
+ }
+
+ mObserver = nullptr;
+
+ MutexAutoLock lock(mLock);
+ mShutdown = true;
+
+ return NS_OK;
+}
diff --git a/xpcom/threads/nsProxyRelease.cpp b/xpcom/threads/nsProxyRelease.cpp
new file mode 100644
index 0000000000..86ed18c8b5
--- /dev/null
+++ b/xpcom/threads/nsProxyRelease.cpp
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsProxyRelease.h"
+#include "nsThreadUtils.h"
+
+namespace detail {
+
+/* static */ nsresult ProxyReleaseChooser<true>::ProxyReleaseISupports(
+ const char* aName, nsIEventTarget* aTarget, nsISupports* aDoomed,
+ bool aAlwaysProxy) {
+ return ::detail::ProxyRelease<nsISupports>(
+ aName, aTarget, dont_AddRef(aDoomed), aAlwaysProxy);
+}
+
+} // namespace detail
+
+extern "C" {
+
+// This function uses C linkage because it's exposed to Rust to support the
+// `ThreadPtrHolder` wrapper in the `moz_task` crate.
+void NS_ProxyReleaseISupports(const char* aName, nsIEventTarget* aTarget,
+ nsISupports* aDoomed, bool aAlwaysProxy) {
+ NS_ProxyRelease(aName, aTarget, dont_AddRef(aDoomed), aAlwaysProxy);
+}
+
+} // extern "C"
diff --git a/xpcom/threads/nsProxyRelease.h b/xpcom/threads/nsProxyRelease.h
new file mode 100644
index 0000000000..00f69e3287
--- /dev/null
+++ b/xpcom/threads/nsProxyRelease.h
@@ -0,0 +1,390 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsProxyRelease_h__
+#define nsProxyRelease_h__
+
+#include <utility>
+
+#include "MainThreadUtils.h"
+#include "mozilla/Likely.h"
+#include "mozilla/Unused.h"
+#include "nsCOMPtr.h"
+#include "nsIEventTarget.h"
+#include "nsISerialEventTarget.h"
+#include "nsIThread.h"
+#include "nsPrintfCString.h"
+#include "nsThreadUtils.h"
+
+#ifdef XPCOM_GLUE_AVOID_NSPR
+# error NS_ProxyRelease implementation depends on NSPR.
+#endif
+
+class nsIRunnable;
+
+namespace detail {
+
+template <typename T>
+class ProxyReleaseEvent : public mozilla::CancelableRunnable {
+ public:
+ ProxyReleaseEvent(const char* aName, already_AddRefed<T> aDoomed)
+ : CancelableRunnable(aName), mDoomed(aDoomed.take()) {}
+
+ NS_IMETHOD Run() override {
+ NS_IF_RELEASE(mDoomed);
+ return NS_OK;
+ }
+
+ nsresult Cancel() override { return Run(); }
+
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ NS_IMETHOD GetName(nsACString& aName) override {
+ if (mName) {
+ aName.Append(nsPrintfCString("ProxyReleaseEvent for %s", mName));
+ } else {
+ aName.AssignLiteral("ProxyReleaseEvent");
+ }
+ return NS_OK;
+ }
+#endif
+
+ private:
+ T* MOZ_OWNING_REF mDoomed;
+};
+
+template <typename T>
+nsresult ProxyRelease(const char* aName, nsIEventTarget* aTarget,
+ already_AddRefed<T> aDoomed, bool aAlwaysProxy) {
+ // Auto-managing release of the pointer.
+ RefPtr<T> doomed = aDoomed;
+ nsresult rv;
+
+ if (!doomed || !aTarget) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (!aAlwaysProxy) {
+ bool onCurrentThread = false;
+ rv = aTarget->IsOnCurrentThread(&onCurrentThread);
+ if (NS_SUCCEEDED(rv) && onCurrentThread) {
+ return NS_OK;
+ }
+ }
+
+ nsCOMPtr<nsIRunnable> ev = new ProxyReleaseEvent<T>(aName, doomed.forget());
+
+ rv = aTarget->Dispatch(ev, NS_DISPATCH_NORMAL);
+ if (NS_FAILED(rv)) {
+ NS_WARNING(nsPrintfCString(
+ "failed to post proxy release event for %s, leaking!", aName)
+ .get());
+ // It is better to leak the aDoomed object than risk crashing as
+ // a result of deleting it on the wrong thread.
+ }
+ return rv;
+}
+
+template <bool nsISupportsBased>
+struct ProxyReleaseChooser {
+ template <typename T>
+ static nsresult ProxyRelease(const char* aName, nsIEventTarget* aTarget,
+ already_AddRefed<T> aDoomed, bool aAlwaysProxy) {
+ return ::detail::ProxyRelease(aName, aTarget, std::move(aDoomed),
+ aAlwaysProxy);
+ }
+};
+
+template <>
+struct ProxyReleaseChooser<true> {
+ // We need an intermediate step for handling classes with ambiguous
+ // inheritance to nsISupports.
+ template <typename T>
+ static nsresult ProxyRelease(const char* aName, nsIEventTarget* aTarget,
+ already_AddRefed<T> aDoomed, bool aAlwaysProxy) {
+ return ProxyReleaseISupports(aName, aTarget, ToSupports(aDoomed.take()),
+ aAlwaysProxy);
+ }
+
+ static nsresult ProxyReleaseISupports(const char* aName,
+ nsIEventTarget* aTarget,
+ nsISupports* aDoomed,
+ bool aAlwaysProxy);
+};
+
+} // namespace detail
+
+/**
+ * Ensures that the delete of a smart pointer occurs on the target thread.
+ * Note: The doomed object will be leaked if dispatch to the target thread
+ * fails, as releasing it on the current thread may be unsafe
+ *
+ * @param aName
+ * the labelling name of the runnable involved in the releasing.
+ * @param aTarget
+ * the target thread where the doomed object should be released.
+ * @param aDoomed
+ * the doomed object; the object to be released on the target thread.
+ * @param aAlwaysProxy
+ * normally, if NS_ProxyRelease is called on the target thread, then the
+ * doomed object will be released directly. However, if this parameter is
+ * true, then an event will always be posted to the target thread for
+ * asynchronous release.
+ * @return result of the task which is dispatched to delete the smart pointer
+ * on the target thread.
+ * Note: The caller should not attempt to recover from an
+ * error code returned by trying to perform the final ->Release()
+ * manually.
+ */
+template <class T>
+inline NS_HIDDEN_(nsresult)
+ NS_ProxyRelease(const char* aName, nsIEventTarget* aTarget,
+ already_AddRefed<T> aDoomed, bool aAlwaysProxy = false) {
+ return ::detail::ProxyReleaseChooser<
+ std::is_base_of<nsISupports, T>::value>::ProxyRelease(aName, aTarget,
+ std::move(aDoomed),
+ aAlwaysProxy);
+}
+
+/**
+ * Ensures that the delete of a smart pointer occurs on the main thread.
+ *
+ * @param aName
+ * the labelling name of the runnable involved in the releasing
+ * @param aDoomed
+ * the doomed object; the object to be released on the main thread.
+ * @param aAlwaysProxy
+ * normally, if NS_ReleaseOnMainThread is called on the main
+ * thread, then the doomed object will be released directly. However, if
+ * this parameter is true, then an event will always be posted to the
+ * main thread for asynchronous release.
+ */
+template <class T>
+inline NS_HIDDEN_(void)
+ NS_ReleaseOnMainThread(const char* aName, already_AddRefed<T> aDoomed,
+ bool aAlwaysProxy = false) {
+ RefPtr<T> doomed = aDoomed;
+ if (!doomed) {
+ return; // Nothing to do.
+ }
+
+ // NS_ProxyRelease treats a null event target as "the current thread". So a
+ // handle on the main thread is only necessary when we're not already on the
+ // main thread or the release must happen asynchronously.
+ nsCOMPtr<nsIEventTarget> target;
+ if (!NS_IsMainThread() || aAlwaysProxy) {
+ target = mozilla::GetMainThreadSerialEventTarget();
+
+ if (!target) {
+ MOZ_ASSERT_UNREACHABLE("Could not get main thread; leaking an object!");
+ mozilla::Unused << doomed.forget().take();
+ return;
+ }
+ }
+
+ NS_ProxyRelease(aName, target, doomed.forget(), aAlwaysProxy);
+}
+
+/**
+ * Class to safely handle main-thread-only pointers off the main thread.
+ *
+ * Classes like XPCWrappedJS are main-thread-only, which means that it is
+ * forbidden to call methods on instances of these classes off the main thread.
+ * For various reasons (see bug 771074), this restriction applies to
+ * AddRef/Release as well.
+ *
+ * This presents a problem for consumers that wish to hold a callback alive
+ * on non-main-thread code. A common example of this is the proxy callback
+ * pattern, where non-main-thread code holds a strong-reference to the callback
+ * object, and dispatches new Runnables (also with a strong reference) to the
+ * main thread in order to execute the callback. This involves several AddRef
+ * and Release calls on the other thread, which is verboten.
+ *
+ * The basic idea of this class is to introduce a layer of indirection.
+ * nsMainThreadPtrHolder is a threadsafe reference-counted class that internally
+ * maintains one strong reference to the main-thread-only object. It must be
+ * instantiated on the main thread (so that the AddRef of the underlying object
+ * happens on the main thread), but consumers may subsequently pass references
+ * to the holder anywhere they please. These references are meant to be opaque
+ * when accessed off-main-thread (assertions enforce this).
+ *
+ * The semantics of RefPtr<nsMainThreadPtrHolder<T>> would be cumbersome, so we
+ * also introduce nsMainThreadPtrHandle<T>, which is conceptually identical to
+ * the above (though it includes various convenience methods). The basic pattern
+ * is as follows.
+ *
+ * // On the main thread:
+ * nsCOMPtr<nsIFooCallback> callback = ...;
+ * nsMainThreadPtrHandle<nsIFooCallback> callbackHandle =
+ * new nsMainThreadPtrHolder<nsIFooCallback>(callback);
+ * // Pass callbackHandle to structs/classes that might be accessed on other
+ * // threads.
+ *
+ * All structs and classes that might be accessed on other threads should store
+ * an nsMainThreadPtrHandle<T> rather than an nsCOMPtr<T>.
+ */
+template <class T>
+class MOZ_IS_SMARTPTR_TO_REFCOUNTED nsMainThreadPtrHolder final {
+ public:
+ // We can only acquire a pointer on the main thread. We want to fail fast for
+ // threading bugs, so by default we assert if our pointer is used or acquired
+ // off-main-thread. But some consumers need to use the same pointer for
+ // multiple classes, some of which are main-thread-only and some of which
+ // aren't. So we allow them to explicitly disable this strict checking.
+ nsMainThreadPtrHolder(const char* aName, T* aPtr, bool aStrict = true,
+ nsIEventTarget* aMainThreadEventTarget = nullptr)
+ : mRawPtr(aPtr),
+ mStrict(aStrict),
+ mMainThreadEventTarget(aMainThreadEventTarget)
+#ifndef RELEASE_OR_BETA
+ ,
+ mName(aName)
+#endif
+ {
+ // We can only AddRef our pointer on the main thread, which means that the
+ // holder must be constructed on the main thread.
+ MOZ_ASSERT(!mStrict || NS_IsMainThread());
+ NS_IF_ADDREF(mRawPtr);
+ }
+ nsMainThreadPtrHolder(const char* aName, already_AddRefed<T> aPtr,
+ bool aStrict = true,
+ nsIEventTarget* aMainThreadEventTarget = nullptr)
+ : mRawPtr(aPtr.take()),
+ mStrict(aStrict),
+ mMainThreadEventTarget(aMainThreadEventTarget)
+#ifndef RELEASE_OR_BETA
+ ,
+ mName(aName)
+#endif
+ {
+ // Since we don't need to AddRef the pointer, this constructor is safe to
+ // call on any thread.
+ }
+
+ // Copy constructor and operator= deleted. Once constructed, the holder is
+ // immutable.
+ T& operator=(nsMainThreadPtrHolder& aOther) = delete;
+ nsMainThreadPtrHolder(const nsMainThreadPtrHolder& aOther) = delete;
+
+ private:
+ // We can be released on any thread.
+ ~nsMainThreadPtrHolder() {
+ if (NS_IsMainThread()) {
+ NS_IF_RELEASE(mRawPtr);
+ } else if (mRawPtr) {
+ if (!mMainThreadEventTarget) {
+ mMainThreadEventTarget = do_GetMainThread();
+ }
+ MOZ_ASSERT(mMainThreadEventTarget);
+ NS_ProxyRelease(
+#ifdef RELEASE_OR_BETA
+ nullptr,
+#else
+ mName,
+#endif
+ mMainThreadEventTarget, dont_AddRef(mRawPtr));
+ }
+ }
+
+ public:
+ T* get() const {
+ // Nobody should be touching the raw pointer off-main-thread.
+ if (mStrict && MOZ_UNLIKELY(!NS_IsMainThread())) {
+ NS_ERROR("Can't dereference nsMainThreadPtrHolder off main thread");
+ MOZ_CRASH();
+ }
+ return mRawPtr;
+ }
+
+ bool operator==(const nsMainThreadPtrHolder<T>& aOther) const {
+ return mRawPtr == aOther.mRawPtr;
+ }
+ bool operator!() const { return !mRawPtr; }
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(nsMainThreadPtrHolder<T>)
+
+ private:
+ // Our wrapped pointer.
+ T* mRawPtr = nullptr;
+
+ // Whether to strictly enforce thread invariants in this class.
+ bool mStrict = true;
+
+ nsCOMPtr<nsIEventTarget> mMainThreadEventTarget;
+
+#ifndef RELEASE_OR_BETA
+ const char* mName = nullptr;
+#endif
+};
+
+template <class T>
+class MOZ_IS_SMARTPTR_TO_REFCOUNTED nsMainThreadPtrHandle {
+ public:
+ nsMainThreadPtrHandle() : mPtr(nullptr) {}
+ MOZ_IMPLICIT nsMainThreadPtrHandle(decltype(nullptr)) : mPtr(nullptr) {}
+ explicit nsMainThreadPtrHandle(nsMainThreadPtrHolder<T>* aHolder)
+ : mPtr(aHolder) {}
+ explicit nsMainThreadPtrHandle(
+ already_AddRefed<nsMainThreadPtrHolder<T>> aHolder)
+ : mPtr(aHolder) {}
+ nsMainThreadPtrHandle(const nsMainThreadPtrHandle& aOther) = default;
+ nsMainThreadPtrHandle(nsMainThreadPtrHandle&& aOther) = default;
+ nsMainThreadPtrHandle& operator=(const nsMainThreadPtrHandle& aOther) =
+ default;
+ nsMainThreadPtrHandle& operator=(nsMainThreadPtrHandle&& aOther) = default;
+ nsMainThreadPtrHandle& operator=(nsMainThreadPtrHolder<T>* aHolder) {
+ mPtr = aHolder;
+ return *this;
+ }
+
+ // These all call through to nsMainThreadPtrHolder, and thus implicitly
+ // assert that we're on the main thread (if strict). Off-main-thread consumers
+ // must treat these handles as opaque.
+ T* get() const {
+ if (mPtr) {
+ return mPtr.get()->get();
+ }
+ return nullptr;
+ }
+
+ operator T*() const { return get(); }
+ T* operator->() const MOZ_NO_ADDREF_RELEASE_ON_RETURN { return get(); }
+
+ // These are safe to call on other threads with appropriate external locking.
+ bool operator==(const nsMainThreadPtrHandle<T>& aOther) const {
+ if (!mPtr || !aOther.mPtr) {
+ return mPtr == aOther.mPtr;
+ }
+ return *mPtr == *aOther.mPtr;
+ }
+ bool operator!=(const nsMainThreadPtrHandle<T>& aOther) const {
+ return !operator==(aOther);
+ }
+ bool operator==(decltype(nullptr)) const { return mPtr == nullptr; }
+ bool operator!=(decltype(nullptr)) const { return mPtr != nullptr; }
+ bool operator!() const { return !mPtr || !*mPtr; }
+
+ private:
+ RefPtr<nsMainThreadPtrHolder<T>> mPtr;
+};
+
+class nsCycleCollectionTraversalCallback;
+template <typename T>
+void CycleCollectionNoteChild(nsCycleCollectionTraversalCallback& aCallback,
+ T* aChild, const char* aName, uint32_t aFlags);
+
+template <typename T>
+inline void ImplCycleCollectionTraverse(
+ nsCycleCollectionTraversalCallback& aCallback,
+ nsMainThreadPtrHandle<T>& aField, const char* aName, uint32_t aFlags = 0) {
+ CycleCollectionNoteChild(aCallback, aField.get(), aName, aFlags);
+}
+
+template <typename T>
+inline void ImplCycleCollectionUnlink(nsMainThreadPtrHandle<T>& aField) {
+ aField = nullptr;
+}
+
+#endif
diff --git a/xpcom/threads/nsThread.cpp b/xpcom/threads/nsThread.cpp
new file mode 100644
index 0000000000..772f6b7738
--- /dev/null
+++ b/xpcom/threads/nsThread.cpp
@@ -0,0 +1,1609 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsThread.h"
+
+#include "base/message_loop.h"
+#include "base/platform_thread.h"
+
+// Chromium's logging can sometimes leak through...
+#ifdef LOG
+# undef LOG
+#endif
+
+#include "mozilla/ReentrantMonitor.h"
+#include "nsMemoryPressure.h"
+#include "nsThreadManager.h"
+#include "nsIClassInfoImpl.h"
+#include "nsCOMPtr.h"
+#include "nsQueryObject.h"
+#include "pratom.h"
+#include "mozilla/BackgroundHangMonitor.h"
+#include "mozilla/CycleCollectedJSContext.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Logging.h"
+#include "nsIObserverService.h"
+#include "mozilla/IOInterposer.h"
+#include "mozilla/ipc/MessageChannel.h"
+#include "mozilla/ipc/BackgroundChild.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/ProfilerRunnable.h"
+#include "mozilla/SchedulerGroup.h"
+#include "mozilla/Services.h"
+#include "mozilla/SpinEventLoopUntil.h"
+#include "mozilla/StaticLocalPtr.h"
+#include "mozilla/StaticPrefs_threads.h"
+#include "mozilla/TaskController.h"
+#include "nsXPCOMPrivate.h"
+#include "mozilla/ChaosMode.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/Unused.h"
+#include "mozilla/dom/DocGroup.h"
+#include "mozilla/dom/ScriptSettings.h"
+#include "nsThreadSyncDispatch.h"
+#include "nsServiceManagerUtils.h"
+#include "GeckoProfiler.h"
+#include "ThreadEventQueue.h"
+#include "ThreadEventTarget.h"
+#include "ThreadDelay.h"
+
+#include <limits>
+
+#ifdef XP_LINUX
+# ifdef __GLIBC__
+# include <gnu/libc-version.h>
+# endif
+# include <sys/mman.h>
+# include <sys/time.h>
+# include <sys/resource.h>
+# include <sched.h>
+# include <stdio.h>
+#endif
+
+#ifdef XP_WIN
+# include "mozilla/DynamicallyLinkedFunctionPtr.h"
+
+# include <winbase.h>
+
+using GetCurrentThreadStackLimitsFn = void(WINAPI*)(PULONG_PTR LowLimit,
+ PULONG_PTR HighLimit);
+#endif
+
+#define HAVE_UALARM \
+ _BSD_SOURCE || \
+ (_XOPEN_SOURCE >= 500 || _XOPEN_SOURCE && _XOPEN_SOURCE_EXTENDED) && \
+ !(_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700)
+
+#if defined(XP_LINUX) && !defined(ANDROID) && defined(_GNU_SOURCE)
+# define HAVE_SCHED_SETAFFINITY
+#endif
+
+#ifdef XP_MACOSX
+# include <mach/mach.h>
+# include <mach/thread_policy.h>
+# include <sys/qos.h>
+#endif
+
+#ifdef MOZ_CANARY
+# include <unistd.h>
+# include <execinfo.h>
+# include <signal.h>
+# include <fcntl.h>
+# include "nsXULAppAPI.h"
+#endif
+
+using namespace mozilla;
+
+extern void InitThreadLocalVariables();
+
+static LazyLogModule sThreadLog("nsThread");
+#ifdef LOG
+# undef LOG
+#endif
+#define LOG(args) MOZ_LOG(sThreadLog, mozilla::LogLevel::Debug, args)
+
+NS_DECL_CI_INTERFACE_GETTER(nsThread)
+
+Array<char, nsThread::kRunnableNameBufSize> nsThread::sMainThreadRunnableName;
+
+#ifdef EARLY_BETA_OR_EARLIER
+const uint32_t kTelemetryWakeupCountLimit = 100;
+#endif
+
+//-----------------------------------------------------------------------------
+// Because we do not have our own nsIFactory, we have to implement nsIClassInfo
+// somewhat manually.
+
+class nsThreadClassInfo : public nsIClassInfo {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED // no mRefCnt
+ NS_DECL_NSICLASSINFO
+
+ nsThreadClassInfo() = default;
+};
+
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsThreadClassInfo::AddRef() { return 2; }
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsThreadClassInfo::Release() { return 1; }
+NS_IMPL_QUERY_INTERFACE(nsThreadClassInfo, nsIClassInfo)
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetInterfaces(nsTArray<nsIID>& aArray) {
+ return NS_CI_INTERFACE_GETTER_NAME(nsThread)(aArray);
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetScriptableHelper(nsIXPCScriptable** aResult) {
+ *aResult = nullptr;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetContractID(nsACString& aResult) {
+ aResult.SetIsVoid(true);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetClassDescription(nsACString& aResult) {
+ aResult.SetIsVoid(true);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetClassID(nsCID** aResult) {
+ *aResult = nullptr;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetFlags(uint32_t* aResult) {
+ *aResult = THREADSAFE;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetClassIDNoAlloc(nsCID* aResult) {
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+//-----------------------------------------------------------------------------
+
+NS_IMPL_ADDREF(nsThread)
+NS_IMPL_RELEASE(nsThread)
+NS_INTERFACE_MAP_BEGIN(nsThread)
+ NS_INTERFACE_MAP_ENTRY(nsIThread)
+ NS_INTERFACE_MAP_ENTRY(nsIThreadInternal)
+ NS_INTERFACE_MAP_ENTRY(nsIEventTarget)
+ NS_INTERFACE_MAP_ENTRY(nsISerialEventTarget)
+ NS_INTERFACE_MAP_ENTRY(nsISupportsPriority)
+ NS_INTERFACE_MAP_ENTRY(nsIDirectTaskDispatcher)
+ NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIThread)
+ if (aIID.Equals(NS_GET_IID(nsIClassInfo))) {
+ static nsThreadClassInfo sThreadClassInfo;
+ foundInterface = static_cast<nsIClassInfo*>(&sThreadClassInfo);
+ } else
+NS_INTERFACE_MAP_END
+NS_IMPL_CI_INTERFACE_GETTER(nsThread, nsIThread, nsIThreadInternal,
+ nsIEventTarget, nsISerialEventTarget,
+ nsISupportsPriority)
+
+//-----------------------------------------------------------------------------
+
+// This event is responsible for notifying nsThread::Shutdown that it is time
+// to call PR_JoinThread. It implements nsICancelableRunnable so that it can
+// run on a DOM Worker thread (where all events must implement
+// nsICancelableRunnable.)
+class nsThreadShutdownAckEvent : public CancelableRunnable {
+ public:
+ explicit nsThreadShutdownAckEvent(NotNull<nsThreadShutdownContext*> aCtx)
+ : CancelableRunnable("nsThreadShutdownAckEvent"),
+ mShutdownContext(aCtx) {}
+ NS_IMETHOD Run() override {
+ mShutdownContext->mTerminatingThread->ShutdownComplete(mShutdownContext);
+ return NS_OK;
+ }
+ nsresult Cancel() override { return Run(); }
+
+ private:
+ virtual ~nsThreadShutdownAckEvent() = default;
+
+ NotNull<RefPtr<nsThreadShutdownContext>> mShutdownContext;
+};
+
+// This event is responsible for setting mShutdownContext
+class nsThreadShutdownEvent : public Runnable {
+ public:
+ nsThreadShutdownEvent(NotNull<nsThread*> aThr,
+ NotNull<nsThreadShutdownContext*> aCtx)
+ : Runnable("nsThreadShutdownEvent"),
+ mThread(aThr),
+ mShutdownContext(aCtx) {}
+ NS_IMETHOD Run() override {
+ // Creates a cycle between `mThread` and the shutdown context which will be
+ // broken when the thread exits.
+ mThread->mShutdownContext = mShutdownContext;
+ MessageLoop::current()->Quit();
+#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+ // Let's leave a trace that we passed here in the thread's name.
+ nsAutoCString threadName(PR_GetThreadName(PR_GetCurrentThread()));
+ threadName.Append(",SHDRCV"_ns);
+ NS_SetCurrentThreadName(threadName.get());
+#endif
+ return NS_OK;
+ }
+
+ private:
+ NotNull<RefPtr<nsThread>> mThread;
+ NotNull<RefPtr<nsThreadShutdownContext>> mShutdownContext;
+};
+
+//-----------------------------------------------------------------------------
+
+static void SetThreadAffinity(unsigned int cpu) {
+#ifdef HAVE_SCHED_SETAFFINITY
+ cpu_set_t cpus;
+ CPU_ZERO(&cpus);
+ CPU_SET(cpu, &cpus);
+ sched_setaffinity(0, sizeof(cpus), &cpus);
+ // Don't assert sched_setaffinity's return value because it intermittently (?)
+ // fails with EINVAL on Linux x64 try runs.
+#elif defined(XP_MACOSX)
+ // OS X does not provide APIs to pin threads to specific processors, but you
+ // can tag threads as belonging to the same "affinity set" and the OS will try
+ // to run them on the same processor. To run threads on different processors,
+ // tag them as belonging to different affinity sets. Tag 0, the default, means
+ // "no affinity" so let's pretend each CPU has its own tag `cpu+1`.
+ thread_affinity_policy_data_t policy;
+ policy.affinity_tag = cpu + 1;
+ kern_return_t kr = thread_policy_set(
+ mach_thread_self(), THREAD_AFFINITY_POLICY, &policy.affinity_tag, 1);
+ // Setting the thread affinity is not supported on ARM.
+ MOZ_ALWAYS_TRUE(kr == KERN_SUCCESS || kr == KERN_NOT_SUPPORTED);
+#elif defined(XP_WIN)
+ MOZ_ALWAYS_TRUE(SetThreadIdealProcessor(GetCurrentThread(), cpu) !=
+ (DWORD)-1);
+#endif
+}
+
+static void SetupCurrentThreadForChaosMode() {
+ if (!ChaosMode::isActive(ChaosFeature::ThreadScheduling)) {
+ return;
+ }
+
+#ifdef XP_LINUX
+ // PR_SetThreadPriority doesn't really work since priorities >
+ // PR_PRIORITY_NORMAL can't be set by non-root users. Instead we'll just use
+ // setpriority(2) to set random 'nice values'. In regular Linux this is only
+ // a dynamic adjustment so it still doesn't really do what we want, but tools
+ // like 'rr' can be more aggressive about honoring these values.
+ // Some of these calls may fail due to trying to lower the priority
+ // (e.g. something may have already called setpriority() for this thread).
+ // This makes it hard to have non-main threads with higher priority than the
+ // main thread, but that's hard to fix. Tools like rr can choose to honor the
+ // requested values anyway.
+ // Use just 4 priorities so there's a reasonable chance of any two threads
+ // having equal priority.
+ setpriority(PRIO_PROCESS, 0, ChaosMode::randomUint32LessThan(4));
+#else
+ // We should set the affinity here but NSPR doesn't provide a way to expose
+ // it.
+ uint32_t priority = ChaosMode::randomUint32LessThan(PR_PRIORITY_LAST + 1);
+ PR_SetThreadPriority(PR_GetCurrentThread(), PRThreadPriority(priority));
+#endif
+
+ // Force half the threads to CPU 0 so they compete for CPU
+ if (ChaosMode::randomUint32LessThan(2)) {
+ SetThreadAffinity(0);
+ }
+}
+
+namespace {
+
+struct ThreadInitData {
+ nsThread* thread;
+ nsCString name;
+};
+
+} // namespace
+
+/* static */ mozilla::OffTheBooksMutex& nsThread::ThreadListMutex() {
+ static StaticLocalAutoPtr<OffTheBooksMutex> sMutex(
+ new OffTheBooksMutex("nsThread::ThreadListMutex"));
+ return *sMutex;
+}
+
+/* static */ LinkedList<nsThread>& nsThread::ThreadList() {
+ static StaticLocalAutoPtr<LinkedList<nsThread>> sList(
+ new LinkedList<nsThread>());
+ return *sList;
+}
+
+/* static */
+nsThreadEnumerator nsThread::Enumerate() { return {}; }
+
+void nsThread::AddToThreadList() {
+ OffTheBooksMutexAutoLock mal(ThreadListMutex());
+ MOZ_ASSERT(!isInList());
+
+ ThreadList().insertBack(this);
+}
+
+void nsThread::MaybeRemoveFromThreadList() {
+ OffTheBooksMutexAutoLock mal(ThreadListMutex());
+ if (isInList()) {
+ removeFrom(ThreadList());
+ }
+}
+
+/*static*/
+void nsThread::ThreadFunc(void* aArg) {
+ using mozilla::ipc::BackgroundChild;
+
+ UniquePtr<ThreadInitData> initData(static_cast<ThreadInitData*>(aArg));
+ nsThread* self = initData->thread; // strong reference
+
+ MOZ_ASSERT(self->mEventTarget);
+ MOZ_ASSERT(self->mEvents);
+
+ // Note: see the comment in nsThread::Init, where we set these same values.
+ DebugOnly<PRThread*> prev = self->mThread.exchange(PR_GetCurrentThread());
+ MOZ_ASSERT(!prev || prev == PR_GetCurrentThread());
+ self->mEventTarget->SetCurrentThread(self->mThread);
+ SetupCurrentThreadForChaosMode();
+
+ if (!initData->name.IsEmpty()) {
+ NS_SetCurrentThreadName(initData->name.BeginReading());
+ }
+
+ self->InitCommon();
+
+ // Inform the ThreadManager
+ nsThreadManager::get().RegisterCurrentThread(*self);
+
+ mozilla::IOInterposer::RegisterCurrentThread();
+
+ // This must come after the call to nsThreadManager::RegisterCurrentThread(),
+ // because that call is needed to properly set up this thread as an nsThread,
+ // which profiler_register_thread() requires. See bug 1347007.
+ const bool registerWithProfiler = !initData->name.IsEmpty();
+ if (registerWithProfiler) {
+ PROFILER_REGISTER_THREAD(initData->name.BeginReading());
+ }
+
+ {
+ // Scope for MessageLoop.
+ MessageLoop loop(
+#if defined(XP_WIN) || defined(XP_MACOSX)
+ self->mIsUiThread ? MessageLoop::TYPE_MOZILLA_NONMAINUITHREAD
+ : MessageLoop::TYPE_MOZILLA_NONMAINTHREAD,
+#else
+ MessageLoop::TYPE_MOZILLA_NONMAINTHREAD,
+#endif
+ self);
+
+ // Now, process incoming events...
+ loop.Run();
+
+ self->mEvents->RunShutdownTasks();
+
+ BackgroundChild::CloseForCurrentThread();
+
+ // NB: The main thread does not shut down here! It shuts down via
+ // nsThreadManager::Shutdown.
+
+ // Do NS_ProcessPendingEvents but with special handling to set
+ // mEventsAreDoomed atomically with the removal of the last event. The key
+ // invariant here is that we will never permit PutEvent to succeed if the
+ // event would be left in the queue after our final call to
+ // NS_ProcessPendingEvents. We also have to keep processing events as long
+ // as we have outstanding mRequestedShutdownContexts.
+ while (true) {
+ // Check and see if we're waiting on any threads.
+ self->WaitForAllAsynchronousShutdowns();
+
+ if (self->mEvents->ShutdownIfNoPendingEvents()) {
+ break;
+ }
+ NS_ProcessPendingEvents(self);
+ }
+ }
+
+ mozilla::IOInterposer::UnregisterCurrentThread();
+
+ // Inform the threadmanager that this thread is going away
+ nsThreadManager::get().UnregisterCurrentThread(*self);
+
+ // The thread should only unregister itself if it was registered above.
+ if (registerWithProfiler) {
+ PROFILER_UNREGISTER_THREAD();
+ }
+
+ NotNull<RefPtr<nsThreadShutdownContext>> context =
+ WrapNotNull(self->mShutdownContext);
+ self->mShutdownContext = nullptr;
+ MOZ_ASSERT(context->mTerminatingThread == self);
+
+ // Take the joining thread from our shutdown context. This may have been
+ // cleared by the joining thread if it decided to cancel waiting on us, in
+ // which case we won't notify our caller, and leak.
+ RefPtr<nsThread> joiningThread;
+ {
+ MutexAutoLock lock(context->mJoiningThreadMutex);
+ joiningThread = context->mJoiningThread.forget();
+ MOZ_RELEASE_ASSERT(joiningThread || context->mThreadLeaked);
+ }
+ if (joiningThread) {
+ // Dispatch shutdown ACK
+ nsCOMPtr<nsIRunnable> event = new nsThreadShutdownAckEvent(context);
+ nsresult dispatch_ack_rv =
+ joiningThread->Dispatch(event, NS_DISPATCH_NORMAL);
+
+ // We do not expect this to ever happen, but If we cannot dispatch
+ // the ack event, someone probably blocks waiting on us and will
+ // crash with a hang later anyways. The best we can do is to tell
+ // the world what happened right here.
+ MOZ_RELEASE_ASSERT(NS_SUCCEEDED(dispatch_ack_rv));
+
+#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+ // Let's leave a trace that we passed here in the thread's name.
+ nsAutoCString threadName(PR_GetThreadName(PR_GetCurrentThread()));
+ threadName.Append(",SHDACK"_ns);
+ NS_SetCurrentThreadName(threadName.get());
+#endif
+ } else {
+ NS_WARNING(
+ "nsThread exiting after StopWaitingAndLeakThread was called, thread "
+ "resources will be leaked!");
+ }
+
+ // Release any observer of the thread here.
+ self->SetObserver(nullptr);
+
+ // The PRThread will be deleted in PR_JoinThread(), so clear references.
+ self->mThread = nullptr;
+ self->mEventTarget->ClearCurrentThread();
+ NS_RELEASE(self);
+}
+
+void nsThread::InitCommon() {
+ mThreadId = uint32_t(PlatformThread::CurrentId());
+
+ {
+#if defined(XP_LINUX)
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_getattr_np(pthread_self(), &attr);
+
+ size_t stackSize;
+ pthread_attr_getstack(&attr, &mStackBase, &stackSize);
+
+ // Glibc prior to 2.27 reports the stack size and base including the guard
+ // region, so we need to compensate for it to get accurate accounting.
+ // Also, this behavior difference isn't guarded by a versioned symbol, so we
+ // actually need to check the runtime glibc version, not the version we were
+ // compiled against.
+ static bool sAdjustForGuardSize = ({
+# ifdef __GLIBC__
+ unsigned major, minor;
+ sscanf(gnu_get_libc_version(), "%u.%u", &major, &minor) < 2 ||
+ major < 2 || (major == 2 && minor < 27);
+# else
+ false;
+# endif
+ });
+ if (sAdjustForGuardSize) {
+ size_t guardSize;
+ pthread_attr_getguardsize(&attr, &guardSize);
+
+ // Note: This assumes that the stack grows down, as is the case on all of
+ // our tier 1 platforms. On platforms where the stack grows up, the
+ // mStackBase adjustment is unnecessary, but doesn't cause any harm other
+ // than under-counting stack memory usage by one page.
+ mStackBase = reinterpret_cast<char*>(mStackBase) + guardSize;
+ stackSize -= guardSize;
+ }
+
+ mStackSize = stackSize;
+
+ // This is a bit of a hack.
+ //
+ // We really do want the NOHUGEPAGE flag on our thread stacks, since we
+ // don't expect any of them to need anywhere near 2MB of space. But setting
+ // it here is too late to have an effect, since the first stack page has
+ // already been faulted in existence, and NSPR doesn't give us a way to set
+ // it beforehand.
+ //
+ // What this does get us, however, is a different set of VM flags on our
+ // thread stacks compared to normal heap memory. Which makes the Linux
+ // kernel report them as separate regions, even when they are adjacent to
+ // heap memory. This allows us to accurately track the actual memory
+ // consumption of our allocated stacks.
+ madvise(mStackBase, stackSize, MADV_NOHUGEPAGE);
+
+ pthread_attr_destroy(&attr);
+#elif defined(XP_WIN)
+ static const StaticDynamicallyLinkedFunctionPtr<
+ GetCurrentThreadStackLimitsFn>
+ sGetStackLimits(L"kernel32.dll", "GetCurrentThreadStackLimits");
+
+ if (sGetStackLimits) {
+ ULONG_PTR stackBottom, stackTop;
+ sGetStackLimits(&stackBottom, &stackTop);
+ mStackBase = reinterpret_cast<void*>(stackBottom);
+ mStackSize = stackTop - stackBottom;
+ }
+#endif
+ }
+
+ InitThreadLocalVariables();
+ AddToThreadList();
+}
+
+//-----------------------------------------------------------------------------
+
+#ifdef MOZ_CANARY
+int sCanaryOutputFD = -1;
+#endif
+
+nsThread::nsThread(NotNull<SynchronizedEventQueue*> aQueue,
+ MainThreadFlag aMainThread,
+ nsIThreadManager::ThreadCreationOptions aOptions)
+ : mEvents(aQueue.get()),
+ mEventTarget(new ThreadEventTarget(
+ mEvents.get(), aMainThread == MAIN_THREAD, aOptions.blockDispatch)),
+ mOutstandingShutdownContexts(0),
+ mShutdownContext(nullptr),
+ mScriptObserver(nullptr),
+ mThreadName("<uninitialized>"),
+ mStackSize(aOptions.stackSize),
+ mNestedEventLoopDepth(0),
+ mShutdownRequired(false),
+ mPriority(PRIORITY_NORMAL),
+ mIsMainThread(aMainThread == MAIN_THREAD),
+ mUseHangMonitor(aMainThread == MAIN_THREAD),
+ mIsUiThread(aOptions.isUiThread),
+ mIsAPoolThreadFree(nullptr),
+ mCanInvokeJS(false),
+#ifdef EARLY_BETA_OR_EARLIER
+ mLastWakeupCheckTime(TimeStamp::Now()),
+#endif
+ mPerformanceCounterState(mNestedEventLoopDepth, mIsMainThread) {
+#if !(defined(XP_WIN) || defined(XP_MACOSX))
+ MOZ_ASSERT(!mIsUiThread,
+ "Non-main UI threads are only supported on Windows and macOS");
+#endif
+ if (mIsMainThread) {
+ MOZ_ASSERT(!mIsUiThread,
+ "Setting isUIThread is not supported for main threads");
+ mozilla::TaskController::Get()->SetPerformanceCounterState(
+ &mPerformanceCounterState);
+ }
+}
+
+nsThread::nsThread()
+ : mEvents(nullptr),
+ mEventTarget(nullptr),
+ mOutstandingShutdownContexts(0),
+ mShutdownContext(nullptr),
+ mScriptObserver(nullptr),
+ mThreadName("<uninitialized>"),
+ mStackSize(0),
+ mNestedEventLoopDepth(0),
+ mShutdownRequired(false),
+ mPriority(PRIORITY_NORMAL),
+ mIsMainThread(false),
+ mUseHangMonitor(false),
+ mIsUiThread(false),
+ mCanInvokeJS(false),
+#ifdef EARLY_BETA_OR_EARLIER
+ mLastWakeupCheckTime(TimeStamp::Now()),
+#endif
+ mPerformanceCounterState(mNestedEventLoopDepth, mIsMainThread) {
+ MOZ_ASSERT(!NS_IsMainThread());
+}
+
+nsThread::~nsThread() {
+ NS_ASSERTION(mOutstandingShutdownContexts == 0,
+ "shouldn't be waiting on other threads to shutdown");
+
+ MaybeRemoveFromThreadList();
+}
+
+nsresult nsThread::Init(const nsACString& aName) {
+ MOZ_ASSERT(mEvents);
+ MOZ_ASSERT(mEventTarget);
+ MOZ_ASSERT(!mThread);
+
+ NS_ADDREF_THIS();
+
+ SetThreadNameInternal(aName);
+
+ mShutdownRequired = true;
+
+ UniquePtr<ThreadInitData> initData(
+ new ThreadInitData{this, nsCString(aName)});
+
+ PRThread* thread = nullptr;
+ // ThreadFunc is responsible for setting mThread
+ if (!(thread = PR_CreateThread(PR_USER_THREAD, ThreadFunc, initData.get(),
+ PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+ PR_JOINABLE_THREAD, mStackSize))) {
+ NS_RELEASE_THIS();
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ // The created thread now owns initData, so release our ownership of it.
+ Unused << initData.release();
+
+ // Note: we set these both here and inside ThreadFunc, to what should be
+ // the same value. This is because calls within ThreadFunc need these values
+ // to be set, and our callers need these values to be set.
+ DebugOnly<PRThread*> prev = mThread.exchange(thread);
+ MOZ_ASSERT(!prev || prev == thread);
+
+ mEventTarget->SetCurrentThread(thread);
+ return NS_OK;
+}
+
+nsresult nsThread::InitCurrentThread() {
+ mThread = PR_GetCurrentThread();
+ SetupCurrentThreadForChaosMode();
+ InitCommon();
+
+ nsThreadManager::get().RegisterCurrentThread(*this);
+ return NS_OK;
+}
+
+void nsThread::GetThreadName(nsACString& aNameBuffer) {
+ auto lock = mThreadName.Lock();
+ aNameBuffer = lock.ref();
+}
+
+void nsThread::SetThreadNameInternal(const nsACString& aName) {
+ auto lock = mThreadName.Lock();
+ lock->Assign(aName);
+}
+
+//-----------------------------------------------------------------------------
+// nsIEventTarget
+
+NS_IMETHODIMP
+nsThread::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) {
+ MOZ_ASSERT(mEventTarget);
+ NS_ENSURE_TRUE(mEventTarget, NS_ERROR_NOT_IMPLEMENTED);
+
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return mEventTarget->Dispatch(event.forget(), aFlags);
+}
+
+NS_IMETHODIMP
+nsThread::Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags) {
+ MOZ_ASSERT(mEventTarget);
+ NS_ENSURE_TRUE(mEventTarget, NS_ERROR_NOT_IMPLEMENTED);
+
+ LOG(("THRD(%p) Dispatch [%p %x]\n", this, /* XXX aEvent */ nullptr, aFlags));
+
+ return mEventTarget->Dispatch(std::move(aEvent), aFlags);
+}
+
+NS_IMETHODIMP
+nsThread::DelayedDispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aDelayMs) {
+ MOZ_ASSERT(mEventTarget);
+ NS_ENSURE_TRUE(mEventTarget, NS_ERROR_NOT_IMPLEMENTED);
+
+ return mEventTarget->DelayedDispatch(std::move(aEvent), aDelayMs);
+}
+
+NS_IMETHODIMP
+nsThread::RegisterShutdownTask(nsITargetShutdownTask* aTask) {
+ MOZ_ASSERT(mEventTarget);
+ NS_ENSURE_TRUE(mEventTarget, NS_ERROR_NOT_IMPLEMENTED);
+
+ return mEventTarget->RegisterShutdownTask(aTask);
+}
+
+NS_IMETHODIMP
+nsThread::UnregisterShutdownTask(nsITargetShutdownTask* aTask) {
+ MOZ_ASSERT(mEventTarget);
+ NS_ENSURE_TRUE(mEventTarget, NS_ERROR_NOT_IMPLEMENTED);
+
+ return mEventTarget->UnregisterShutdownTask(aTask);
+}
+
+NS_IMETHODIMP
+nsThread::GetRunningEventDelay(TimeDuration* aDelay, TimeStamp* aStart) {
+ if (mIsAPoolThreadFree && *mIsAPoolThreadFree) {
+ // if there are unstarted threads in the pool, a new event to the
+ // pool would not be delayed at all (beyond thread start time)
+ *aDelay = TimeDuration();
+ *aStart = TimeStamp();
+ } else {
+ *aDelay = mLastEventDelay;
+ *aStart = mLastEventStart;
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::SetRunningEventDelay(TimeDuration aDelay, TimeStamp aStart) {
+ mLastEventDelay = aDelay;
+ mLastEventStart = aStart;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::IsOnCurrentThread(bool* aResult) {
+ if (mEventTarget) {
+ return mEventTarget->IsOnCurrentThread(aResult);
+ }
+ *aResult = PR_GetCurrentThread() == mThread;
+ return NS_OK;
+}
+
+NS_IMETHODIMP_(bool)
+nsThread::IsOnCurrentThreadInfallible() {
+ // This method is only going to be called if `mThread` is null, which
+ // only happens when the thread has exited the event loop. Therefore, when
+ // we are called, we can never be on this thread.
+ return false;
+}
+
+//-----------------------------------------------------------------------------
+// nsIThread
+
+NS_IMETHODIMP
+nsThread::GetPRThread(PRThread** aResult) {
+ PRThread* thread = mThread; // atomic load
+ *aResult = thread;
+ return thread ? NS_OK : NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+nsThread::GetCanInvokeJS(bool* aResult) {
+ *aResult = mCanInvokeJS;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::SetCanInvokeJS(bool aCanInvokeJS) {
+ mCanInvokeJS = aCanInvokeJS;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::GetLastLongTaskEnd(TimeStamp* _retval) {
+ *_retval = mPerformanceCounterState.LastLongTaskEnd();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::GetLastLongNonIdleTaskEnd(TimeStamp* _retval) {
+ *_retval = mPerformanceCounterState.LastLongNonIdleTaskEnd();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::SetNameForWakeupTelemetry(const nsACString& aName) {
+#ifdef EARLY_BETA_OR_EARLIER
+ mNameForWakeupTelemetry = aName;
+#endif
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::AsyncShutdown() {
+ LOG(("THRD(%p) async shutdown\n", this));
+
+ nsCOMPtr<nsIThreadShutdown> shutdown;
+ BeginShutdown(getter_AddRefs(shutdown));
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::BeginShutdown(nsIThreadShutdown** aShutdown) {
+ LOG(("THRD(%p) begin shutdown\n", this));
+
+ MOZ_ASSERT(mEvents);
+ MOZ_ASSERT(mEventTarget);
+ MOZ_ASSERT(mThread != PR_GetCurrentThread());
+ if (NS_WARN_IF(mThread == PR_GetCurrentThread())) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // Prevent multiple calls to this method.
+ if (!mShutdownRequired.compareExchange(true, false)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ MOZ_ASSERT(mThread);
+
+ MaybeRemoveFromThreadList();
+
+ RefPtr<nsThread> currentThread = nsThreadManager::get().GetCurrentThread();
+
+ MOZ_DIAGNOSTIC_ASSERT(currentThread->EventQueue(),
+ "Shutdown() may only be called from an XPCOM thread");
+
+ // Allocate a shutdown context, and record that we're waiting for it.
+ RefPtr<nsThreadShutdownContext> context =
+ new nsThreadShutdownContext(WrapNotNull(this), currentThread);
+
+ ++currentThread->mOutstandingShutdownContexts;
+ nsCOMPtr<nsIRunnable> clearOutstanding = NS_NewRunnableFunction(
+ "nsThread::ClearOutstandingShutdownContext",
+ [currentThread] { --currentThread->mOutstandingShutdownContexts; });
+ context->OnCompletion(clearOutstanding);
+
+ // Set mShutdownContext and wake up the thread in case it is waiting for
+ // events to process.
+ nsCOMPtr<nsIRunnable> event =
+ new nsThreadShutdownEvent(WrapNotNull(this), WrapNotNull(context));
+ if (!mEvents->PutEvent(event.forget(), EventQueuePriority::Normal)) {
+ // We do not expect this to happen. Let's collect some diagnostics.
+ nsAutoCString threadName;
+ GetThreadName(threadName);
+ MOZ_CRASH_UNSAFE_PRINTF("Attempt to shutdown an already dead thread: %s",
+ threadName.get());
+ }
+
+ // We could still end up with other events being added after the shutdown
+ // task, but that's okay because we process pending events in ThreadFunc
+ // after setting mShutdownContext just before exiting.
+ context.forget(aShutdown);
+ return NS_OK;
+}
+
+void nsThread::ShutdownComplete(NotNull<nsThreadShutdownContext*> aContext) {
+ MOZ_ASSERT(mEvents);
+ MOZ_ASSERT(mEventTarget);
+ MOZ_ASSERT(aContext->mTerminatingThread == this);
+
+#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
+ {
+ MutexAutoLock lock(aContext->mJoiningThreadMutex);
+
+ // StopWaitingAndLeakThread is explicitely meant to not cause a
+ // nsThreadShutdownAckEvent on the joining thread, which is the only
+ // caller of ShutdownComplete.
+ MOZ_DIAGNOSTIC_ASSERT(!aContext->mThreadLeaked);
+ }
+#endif
+
+ MaybeRemoveFromThreadList();
+
+ // Now, it should be safe to join without fear of dead-locking.
+ PR_JoinThread(aContext->mTerminatingPRThread);
+ MOZ_ASSERT(!mThread);
+
+#ifdef DEBUG
+ nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserver();
+ MOZ_ASSERT(!obs, "Should have been cleared at shutdown!");
+#endif
+
+ aContext->MarkCompleted();
+}
+
+void nsThread::WaitForAllAsynchronousShutdowns() {
+ // This is the motivating example for why SpinEventLoopUntil
+ // has the template parameter we are providing here.
+ SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
+ "nsThread::WaitForAllAsynchronousShutdowns"_ns,
+ [&]() { return mOutstandingShutdownContexts == 0; }, this);
+}
+
+NS_IMETHODIMP
+nsThread::Shutdown() {
+ LOG(("THRD(%p) sync shutdown\n", this));
+
+ nsCOMPtr<nsIThreadShutdown> context;
+ nsresult rv = BeginShutdown(getter_AddRefs(context));
+ if (NS_FAILED(rv)) {
+ return NS_OK; // The thread has already shut down.
+ }
+
+ // If we are going to hang here we want to see the thread's name
+ nsAutoCString threadName;
+ GetThreadName(threadName);
+
+ // Process events on the current thread until we receive a shutdown ACK.
+ // Allows waiting; ensure no locks are held that would deadlock us!
+ SpinEventLoopUntil("nsThread::Shutdown: "_ns + threadName,
+ [&]() { return context->GetCompleted(); });
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::HasPendingEvents(bool* aResult) {
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ if (mIsMainThread) {
+ *aResult = TaskController::Get()->HasMainThreadPendingTasks();
+ } else {
+ *aResult = mEvents->HasPendingEvent();
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::HasPendingHighPriorityEvents(bool* aResult) {
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ // This function appears to never be called anymore.
+ *aResult = false;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::DispatchToQueue(already_AddRefed<nsIRunnable> aEvent,
+ EventQueuePriority aQueue) {
+ nsCOMPtr<nsIRunnable> event = aEvent;
+
+ if (NS_WARN_IF(!event)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (!mEvents->PutEvent(event.forget(), aQueue)) {
+ NS_WARNING(
+ "An idle event was posted to a thread that will never run it "
+ "(rejected)");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsThread::SetThreadQoS(nsIThread::QoSPriority aPriority) {
+ if (!StaticPrefs::threads_use_low_power_enabled()) {
+ return NS_OK;
+ }
+ // The approach here is to have a thread set itself for its QoS level,
+ // so we assert if we aren't on the current thread.
+ MOZ_ASSERT(IsOnCurrentThread(), "Can only change the current thread's QoS");
+
+#if defined(XP_MACOSX)
+ // Only arm64 macs may possess heterogeneous cores. On these, we can tell
+ // a thread to set its own QoS status. On intel macs things should behave
+ // normally, and the OS will ignore the QoS state of the thread.
+ if (aPriority == nsIThread::QOS_PRIORITY_LOW) {
+ pthread_set_qos_class_self_np(QOS_CLASS_BACKGROUND, 0);
+ } else if (NS_IsMainThread()) {
+ // MacOS documentation specifies that a main thread should be initialized at
+ // the USER_INTERACTIVE priority, so when we restore thread priorities the
+ // main thread should be setting itself to this.
+ pthread_set_qos_class_self_np(QOS_CLASS_USER_INTERACTIVE, 0);
+ } else {
+ pthread_set_qos_class_self_np(QOS_CLASS_DEFAULT, 0);
+ }
+#endif
+ // Do nothing if an OS-specific implementation is unavailable.
+ return NS_OK;
+}
+
+#ifdef MOZ_CANARY
+void canary_alarm_handler(int signum);
+
+class Canary {
+ // XXX ToDo: support nested loops
+ public:
+ Canary() {
+ if (sCanaryOutputFD > 0 && EventLatencyIsImportant()) {
+ signal(SIGALRM, canary_alarm_handler);
+ ualarm(15000, 0);
+ }
+ }
+
+ ~Canary() {
+ if (sCanaryOutputFD != 0 && EventLatencyIsImportant()) {
+ ualarm(0, 0);
+ }
+ }
+
+ static bool EventLatencyIsImportant() {
+ return NS_IsMainThread() && XRE_IsParentProcess();
+ }
+};
+
+void canary_alarm_handler(int signum) {
+ void* array[30];
+ const char msg[29] = "event took too long to run:\n";
+ // use write to be safe in the signal handler
+ write(sCanaryOutputFD, msg, sizeof(msg));
+ backtrace_symbols_fd(array, backtrace(array, 30), sCanaryOutputFD);
+}
+
+#endif
+
+#define NOTIFY_EVENT_OBSERVERS(observers_, func_, params_) \
+ do { \
+ if (!observers_.IsEmpty()) { \
+ for (nsCOMPtr<nsIThreadObserver> obs_ : observers_.ForwardRange()) { \
+ obs_->func_ params_; \
+ } \
+ } \
+ } while (0)
+
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+// static
+bool nsThread::GetLabeledRunnableName(nsIRunnable* aEvent, nsACString& aName,
+ EventQueuePriority aPriority) {
+ bool labeled = false;
+ if (RefPtr<SchedulerGroup::Runnable> groupRunnable = do_QueryObject(aEvent)) {
+ labeled = true;
+ MOZ_ALWAYS_TRUE(NS_SUCCEEDED(groupRunnable->GetName(aName)));
+ } else if (nsCOMPtr<nsINamed> named = do_QueryInterface(aEvent)) {
+ MOZ_ALWAYS_TRUE(NS_SUCCEEDED(named->GetName(aName)));
+ } else {
+ aName.AssignLiteral("non-nsINamed runnable");
+ }
+ if (aName.IsEmpty()) {
+ aName.AssignLiteral("anonymous runnable");
+ }
+
+ if (!labeled && aPriority > EventQueuePriority::InputHigh) {
+ aName.AppendLiteral("(unlabeled)");
+ }
+
+ return labeled;
+}
+#endif
+
+mozilla::PerformanceCounter* nsThread::GetPerformanceCounter(
+ nsIRunnable* aEvent) const {
+ return GetPerformanceCounterBase(aEvent);
+}
+
+// static
+mozilla::PerformanceCounter* nsThread::GetPerformanceCounterBase(
+ nsIRunnable* aEvent) {
+ RefPtr<SchedulerGroup::Runnable> docRunnable = do_QueryObject(aEvent);
+ if (docRunnable) {
+ return docRunnable->GetPerformanceCounter();
+ }
+ return nullptr;
+}
+
+size_t nsThread::ShallowSizeOfIncludingThis(
+ mozilla::MallocSizeOf aMallocSizeOf) const {
+ size_t n = 0;
+ if (mShutdownContext) {
+ n += aMallocSizeOf(mShutdownContext);
+ }
+ return aMallocSizeOf(this) + aMallocSizeOf(mThread) + n;
+}
+
+size_t nsThread::SizeOfEventQueues(mozilla::MallocSizeOf aMallocSizeOf) const {
+ size_t n = 0;
+ if (mEventTarget) {
+ // The size of mEvents is reported by mEventTarget.
+ n += mEventTarget->SizeOfIncludingThis(aMallocSizeOf);
+ }
+ return n;
+}
+
+size_t nsThread::SizeOfIncludingThis(
+ mozilla::MallocSizeOf aMallocSizeOf) const {
+ return ShallowSizeOfIncludingThis(aMallocSizeOf) +
+ SizeOfEventQueues(aMallocSizeOf);
+}
+
+NS_IMETHODIMP
+nsThread::ProcessNextEvent(bool aMayWait, bool* aResult) {
+ MOZ_ASSERT(mEvents);
+ NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
+
+ LOG(("THRD(%p) ProcessNextEvent [%u %u]\n", this, aMayWait,
+ mNestedEventLoopDepth));
+
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ // The toplevel event loop normally blocks waiting for the next event, but
+ // if we're trying to shut this thread down, we must exit the event loop
+ // when the event queue is empty. This only applys to the toplevel event
+ // loop! Nested event loops (e.g. during sync dispatch) are waiting for
+ // some state change and must be able to block even if something has
+ // requested shutdown of the thread. Otherwise we'll just busywait as we
+ // endlessly look for an event, fail to find one, and repeat the nested
+ // event loop since its state change hasn't happened yet.
+ bool reallyWait = aMayWait && (mNestedEventLoopDepth > 0 || !ShuttingDown());
+
+ Maybe<dom::AutoNoJSAPI> noJSAPI;
+
+ if (mUseHangMonitor && reallyWait) {
+ BackgroundHangMonitor().NotifyWait();
+ }
+
+ if (mIsMainThread) {
+ DoMainThreadSpecificProcessing();
+ }
+
+ ++mNestedEventLoopDepth;
+
+ // We only want to create an AutoNoJSAPI on threads that actually do DOM
+ // stuff (including workers). Those are exactly the threads that have an
+ // mScriptObserver.
+ bool callScriptObserver = !!mScriptObserver;
+ if (callScriptObserver) {
+ noJSAPI.emplace();
+ mScriptObserver->BeforeProcessTask(reallyWait);
+ }
+
+ DrainDirectTasks();
+
+#ifdef EARLY_BETA_OR_EARLIER
+ // Need to capture mayWaitForWakeup state before OnProcessNextEvent,
+ // since on the main thread OnProcessNextEvent ends up waiting for the new
+ // events.
+ bool mayWaitForWakeup = reallyWait && !mEvents->HasPendingEvent();
+#endif
+
+ nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserverOnThread();
+ if (obs) {
+ obs->OnProcessNextEvent(this, reallyWait);
+ }
+
+ NOTIFY_EVENT_OBSERVERS(EventQueue()->EventObservers(), OnProcessNextEvent,
+ (this, reallyWait));
+
+ DrainDirectTasks();
+
+#ifdef MOZ_CANARY
+ Canary canary;
+#endif
+ nsresult rv = NS_OK;
+
+ {
+ // Scope for |event| to make sure that its destructor fires while
+ // mNestedEventLoopDepth has been incremented, since that destructor can
+ // also do work.
+ nsCOMPtr<nsIRunnable> event;
+ bool usingTaskController = mIsMainThread;
+ if (usingTaskController) {
+ event = TaskController::Get()->GetRunnableForMTTask(reallyWait);
+ } else {
+ event = mEvents->GetEvent(reallyWait, &mLastEventDelay);
+ }
+
+ *aResult = (event.get() != nullptr);
+
+ if (event) {
+#ifdef EARLY_BETA_OR_EARLIER
+ if (mayWaitForWakeup && mThread) {
+ ++mWakeupCount;
+ if (mWakeupCount == kTelemetryWakeupCountLimit) {
+ TimeStamp now = TimeStamp::Now();
+ double ms = (now - mLastWakeupCheckTime).ToMilliseconds();
+ if (ms < 0) {
+ ms = 0;
+ }
+ const char* name = !mNameForWakeupTelemetry.IsEmpty()
+ ? mNameForWakeupTelemetry.get()
+ : PR_GetThreadName(mThread);
+ if (!name) {
+ name = mIsMainThread ? "MainThread" : "(nameless thread)";
+ }
+ nsDependentCString key(name);
+ Telemetry::Accumulate(Telemetry::THREAD_WAKEUP, key,
+ static_cast<uint32_t>(ms));
+ mLastWakeupCheckTime = now;
+ mWakeupCount = 0;
+ }
+ }
+#endif
+
+ LOG(("THRD(%p) running [%p]\n", this, event.get()));
+
+ Maybe<LogRunnable::Run> log;
+
+ if (!usingTaskController) {
+ log.emplace(event);
+ }
+
+ // Delay event processing to encourage whoever dispatched this event
+ // to run.
+ DelayForChaosMode(ChaosFeature::TaskRunning, 1000);
+
+ mozilla::TimeStamp now = mozilla::TimeStamp::Now();
+
+ if (mUseHangMonitor) {
+ BackgroundHangMonitor().NotifyActivity();
+ }
+
+ Maybe<PerformanceCounterState::Snapshot> snapshot;
+ if (!usingTaskController) {
+ snapshot.emplace(mPerformanceCounterState.RunnableWillRun(
+ GetPerformanceCounter(event), now, false));
+ }
+
+ mLastEventStart = now;
+
+ if (!usingTaskController) {
+ AUTO_PROFILE_FOLLOWING_RUNNABLE(event);
+ event->Run();
+ } else {
+ // Avoid generating "Runnable" profiler markers for the
+ // "TaskController::ExecutePendingMTTasks" runnables created
+ // by TaskController, which already adds "Runnable" markers
+ // when executing tasks.
+ event->Run();
+ }
+
+ if (usingTaskController) {
+ *aResult = TaskController::Get()->MTTaskRunnableProcessedTask();
+ } else {
+ mPerformanceCounterState.RunnableDidRun(EmptyCString(),
+ std::move(snapshot.ref()));
+ }
+
+ // To cover the event's destructor code inside the LogRunnable span.
+ event = nullptr;
+ } else {
+ mLastEventDelay = TimeDuration();
+ mLastEventStart = TimeStamp();
+ if (aMayWait) {
+ MOZ_ASSERT(ShuttingDown(),
+ "This should only happen when shutting down");
+ rv = NS_ERROR_UNEXPECTED;
+ }
+ }
+ }
+
+ DrainDirectTasks();
+
+ NOTIFY_EVENT_OBSERVERS(EventQueue()->EventObservers(), AfterProcessNextEvent,
+ (this, *aResult));
+
+ if (obs) {
+ obs->AfterProcessNextEvent(this, *aResult);
+ }
+
+ // In case some EventObserver dispatched some direct tasks; process them
+ // now.
+ DrainDirectTasks();
+
+ if (callScriptObserver) {
+ if (mScriptObserver) {
+ mScriptObserver->AfterProcessTask(mNestedEventLoopDepth);
+ }
+ noJSAPI.reset();
+ }
+
+ --mNestedEventLoopDepth;
+
+ return rv;
+}
+
+//-----------------------------------------------------------------------------
+// nsISupportsPriority
+
+NS_IMETHODIMP
+nsThread::GetPriority(int32_t* aPriority) {
+ *aPriority = mPriority;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::SetPriority(int32_t aPriority) {
+ if (NS_WARN_IF(!mThread)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ // NSPR defines the following four thread priorities:
+ // PR_PRIORITY_LOW
+ // PR_PRIORITY_NORMAL
+ // PR_PRIORITY_HIGH
+ // PR_PRIORITY_URGENT
+ // We map the priority values defined on nsISupportsPriority to these
+ // values.
+
+ mPriority = aPriority;
+
+ PRThreadPriority pri;
+ if (mPriority <= PRIORITY_HIGHEST) {
+ pri = PR_PRIORITY_URGENT;
+ } else if (mPriority < PRIORITY_NORMAL) {
+ pri = PR_PRIORITY_HIGH;
+ } else if (mPriority > PRIORITY_NORMAL) {
+ pri = PR_PRIORITY_LOW;
+ } else {
+ pri = PR_PRIORITY_NORMAL;
+ }
+ // If chaos mode is active, retain the randomly chosen priority
+ if (!ChaosMode::isActive(ChaosFeature::ThreadScheduling)) {
+ PR_SetThreadPriority(mThread, pri);
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::AdjustPriority(int32_t aDelta) {
+ return SetPriority(mPriority + aDelta);
+}
+
+//-----------------------------------------------------------------------------
+// nsIThreadInternal
+
+NS_IMETHODIMP
+nsThread::GetObserver(nsIThreadObserver** aObs) {
+ MOZ_ASSERT(mEvents);
+ NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
+
+ nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserver();
+ obs.forget(aObs);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::SetObserver(nsIThreadObserver* aObs) {
+ MOZ_ASSERT(mEvents);
+ NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
+
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ mEvents->SetObserver(aObs);
+ return NS_OK;
+}
+
+uint32_t nsThread::RecursionDepth() const {
+ MOZ_ASSERT(PR_GetCurrentThread() == mThread);
+ return mNestedEventLoopDepth;
+}
+
+NS_IMETHODIMP
+nsThread::AddObserver(nsIThreadObserver* aObserver) {
+ MOZ_ASSERT(mEvents);
+ NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
+
+ if (NS_WARN_IF(!aObserver)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ EventQueue()->AddObserver(aObserver);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::RemoveObserver(nsIThreadObserver* aObserver) {
+ MOZ_ASSERT(mEvents);
+ NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
+
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ EventQueue()->RemoveObserver(aObserver);
+
+ return NS_OK;
+}
+
+void nsThread::SetScriptObserver(
+ mozilla::CycleCollectedJSContext* aScriptObserver) {
+ if (!aScriptObserver) {
+ mScriptObserver = nullptr;
+ return;
+ }
+
+ MOZ_ASSERT(!mScriptObserver);
+ mScriptObserver = aScriptObserver;
+}
+
+void NS_DispatchMemoryPressure();
+
+void nsThread::DoMainThreadSpecificProcessing() const {
+ MOZ_ASSERT(mIsMainThread);
+
+ ipc::CancelCPOWs();
+
+ // Fire a memory pressure notification, if one is pending.
+ if (!ShuttingDown()) {
+ NS_DispatchMemoryPressure();
+ }
+}
+
+//-----------------------------------------------------------------------------
+// nsIDirectTaskDispatcher
+
+NS_IMETHODIMP
+nsThread::DispatchDirectTask(already_AddRefed<nsIRunnable> aEvent) {
+ if (!IsOnCurrentThread()) {
+ return NS_ERROR_FAILURE;
+ }
+ mDirectTasks.AddTask(std::move(aEvent));
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsThread::DrainDirectTasks() {
+ if (!IsOnCurrentThread()) {
+ return NS_ERROR_FAILURE;
+ }
+ mDirectTasks.DrainTasks();
+ return NS_OK;
+}
+
+NS_IMETHODIMP nsThread::HaveDirectTasks(bool* aValue) {
+ if (!IsOnCurrentThread()) {
+ return NS_ERROR_FAILURE;
+ }
+
+ *aValue = mDirectTasks.HaveTasks();
+ return NS_OK;
+}
+
+NS_IMPL_ISUPPORTS(nsThreadShutdownContext, nsIThreadShutdown)
+
+NS_IMETHODIMP
+nsThreadShutdownContext::OnCompletion(nsIRunnable* aEvent) {
+ if (mCompleted) {
+ aEvent->Run();
+ } else {
+ mCompletionCallbacks.AppendElement(aEvent);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadShutdownContext::GetCompleted(bool* aCompleted) {
+ *aCompleted = mCompleted;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadShutdownContext::StopWaitingAndLeakThread() {
+ // Take the joining thread from `mJoiningThread` so that the terminating
+ // thread won't try to dispatch nsThreadShutdownAckEvent to us anymore.
+ RefPtr<nsThread> joiningThread;
+ {
+ MutexAutoLock lock(mJoiningThreadMutex);
+ if (!mJoiningThread) {
+ // Shutdown is already being resolved, so there's nothing for us to do.
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ joiningThread = mJoiningThread.forget();
+ mThreadLeaked = true;
+ }
+
+ MOZ_DIAGNOSTIC_ASSERT(joiningThread->IsOnCurrentThread());
+
+ MarkCompleted();
+
+ return NS_OK;
+}
+
+void nsThreadShutdownContext::MarkCompleted() {
+ MOZ_ASSERT(!mCompleted);
+ mCompleted = true;
+ nsTArray<nsCOMPtr<nsIRunnable>> callbacks(std::move(mCompletionCallbacks));
+ for (auto& callback : callbacks) {
+ callback->Run();
+ }
+}
+
+namespace mozilla {
+PerformanceCounterState::Snapshot PerformanceCounterState::RunnableWillRun(
+ PerformanceCounter* aCounter, TimeStamp aNow, bool aIsIdleRunnable) {
+ if (IsNestedRunnable()) {
+ // Flush out any accumulated time that should be accounted to the
+ // current runnable before we start running a nested runnable.
+ MaybeReportAccumulatedTime("nested runnable"_ns, aNow);
+ }
+
+ Snapshot snapshot(mCurrentEventLoopDepth, mCurrentPerformanceCounter,
+ mCurrentRunnableIsIdleRunnable);
+
+ mCurrentEventLoopDepth = mNestedEventLoopDepth;
+ mCurrentPerformanceCounter = aCounter;
+ mCurrentRunnableIsIdleRunnable = aIsIdleRunnable;
+ mCurrentTimeSliceStart = aNow;
+
+ return snapshot;
+}
+
+void PerformanceCounterState::RunnableDidRun(const nsCString& aName,
+ Snapshot&& aSnapshot) {
+ // First thing: Restore our mCurrentEventLoopDepth so we can use
+ // IsNestedRunnable().
+ mCurrentEventLoopDepth = aSnapshot.mOldEventLoopDepth;
+
+ // We may not need the current timestamp; don't bother computing it if we
+ // don't.
+ TimeStamp now;
+ if (mCurrentPerformanceCounter || mIsMainThread || IsNestedRunnable()) {
+ now = TimeStamp::Now();
+ }
+ if (mCurrentPerformanceCounter || mIsMainThread) {
+ MaybeReportAccumulatedTime(aName, now);
+ }
+
+ // And now restore the rest of our state.
+ mCurrentPerformanceCounter = std::move(aSnapshot.mOldPerformanceCounter);
+ mCurrentRunnableIsIdleRunnable = aSnapshot.mOldIsIdleRunnable;
+ if (IsNestedRunnable()) {
+ // Reset mCurrentTimeSliceStart to right now, so our parent runnable's
+ // next slice can be properly accounted for.
+ mCurrentTimeSliceStart = now;
+ } else {
+ // We are done at the outermost level; we are no longer in a timeslice.
+ mCurrentTimeSliceStart = TimeStamp();
+ }
+}
+
+void PerformanceCounterState::MaybeReportAccumulatedTime(const nsCString& aName,
+ TimeStamp aNow) {
+ MOZ_ASSERT(mCurrentTimeSliceStart,
+ "How did we get here if we're not in a timeslice?");
+
+ if (!mCurrentPerformanceCounter && !mIsMainThread) {
+ // No one cares about this timeslice.
+ return;
+ }
+
+ TimeDuration duration = aNow - mCurrentTimeSliceStart;
+ if (mCurrentPerformanceCounter) {
+ mCurrentPerformanceCounter->IncrementExecutionDuration(
+ duration.ToMicroseconds());
+ }
+
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ if (mIsMainThread && duration.ToMilliseconds() > LONGTASK_TELEMETRY_MS) {
+ Telemetry::Accumulate(Telemetry::EVENT_LONGTASK, aName,
+ duration.ToMilliseconds());
+ }
+#endif
+
+ // Long tasks only matter on the main thread.
+ if (mIsMainThread && duration.ToMilliseconds() > LONGTASK_BUSY_WINDOW_MS) {
+ // Idle events (gc...) don't *really* count here
+ if (!mCurrentRunnableIsIdleRunnable) {
+ mLastLongNonIdleTaskEnd = aNow;
+ }
+ mLastLongTaskEnd = aNow;
+
+ if (profiler_thread_is_being_profiled_for_markers()) {
+ struct LongTaskMarker {
+ static constexpr Span<const char> MarkerTypeName() {
+ return MakeStringSpan("MainThreadLongTask");
+ }
+ static void StreamJSONMarkerData(
+ baseprofiler::SpliceableJSONWriter& aWriter) {
+ aWriter.StringProperty("category", "LongTask");
+ }
+ static MarkerSchema MarkerTypeDisplay() {
+ using MS = MarkerSchema;
+ MS schema{MS::Location::MarkerChart, MS::Location::MarkerTable};
+ schema.AddKeyLabelFormatSearchable("category", "Type",
+ MS::Format::String,
+ MS::Searchable::Searchable);
+ return schema;
+ }
+ };
+
+ profiler_add_marker(mCurrentRunnableIsIdleRunnable
+ ? ProfilerString8View("LongIdleTask")
+ : ProfilerString8View("LongTask"),
+ geckoprofiler::category::OTHER,
+ MarkerTiming::Interval(mCurrentTimeSliceStart, aNow),
+ LongTaskMarker{});
+ }
+ }
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/nsThread.h b/xpcom/threads/nsThread.h
new file mode 100644
index 0000000000..e4b0eece51
--- /dev/null
+++ b/xpcom/threads/nsThread.h
@@ -0,0 +1,400 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsThread_h__
+#define nsThread_h__
+
+#include "MainThreadUtils.h"
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/DataMutex.h"
+#include "mozilla/EventQueue.h"
+#include "mozilla/LinkedList.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/NotNull.h"
+#include "mozilla/PerformanceCounter.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/TaskDispatcher.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/UniquePtr.h"
+#include "nsIDirectTaskDispatcher.h"
+#include "nsIEventTarget.h"
+#include "nsISerialEventTarget.h"
+#include "nsISupportsPriority.h"
+#include "nsIThread.h"
+#include "nsIThreadInternal.h"
+#include "nsTArray.h"
+
+namespace mozilla {
+class CycleCollectedJSContext;
+class DelayedRunnable;
+class SynchronizedEventQueue;
+class ThreadEventQueue;
+class ThreadEventTarget;
+
+template <typename T, size_t Length>
+class Array;
+} // namespace mozilla
+
+using mozilla::NotNull;
+
+class nsIRunnable;
+class nsThreadEnumerator;
+class nsThreadShutdownContext;
+
+// See https://www.w3.org/TR/longtasks
+#define LONGTASK_BUSY_WINDOW_MS 50
+
+// Time a Runnable executes before we accumulate telemetry on it
+#define LONGTASK_TELEMETRY_MS 30
+
+// A class for managing performance counter state.
+namespace mozilla {
+class PerformanceCounterState {
+ public:
+ explicit PerformanceCounterState(const uint32_t& aNestedEventLoopDepthRef,
+ bool aIsMainThread)
+ : mNestedEventLoopDepth(aNestedEventLoopDepthRef),
+ mIsMainThread(aIsMainThread),
+ // Does it really make sense to initialize these to "now" when we
+ // haven't run any tasks?
+ mLastLongTaskEnd(TimeStamp::Now()),
+ mLastLongNonIdleTaskEnd(mLastLongTaskEnd) {}
+
+ class Snapshot {
+ public:
+ Snapshot(uint32_t aOldEventLoopDepth, PerformanceCounter* aCounter,
+ bool aOldIsIdleRunnable)
+ : mOldEventLoopDepth(aOldEventLoopDepth),
+ mOldPerformanceCounter(aCounter),
+ mOldIsIdleRunnable(aOldIsIdleRunnable) {}
+
+ Snapshot(const Snapshot&) = default;
+ Snapshot(Snapshot&&) = default;
+
+ private:
+ friend class PerformanceCounterState;
+
+ const uint32_t mOldEventLoopDepth;
+ // Non-const so we can move out of it and avoid the extra refcounting.
+ RefPtr<PerformanceCounter> mOldPerformanceCounter;
+ const bool mOldIsIdleRunnable;
+ };
+
+ // Notification that a runnable is about to run. This captures a snapshot of
+ // our current state before we reset to prepare for the new runnable. This
+ // muast be called after mNestedEventLoopDepth has been incremented for the
+ // runnable execution. The performance counter passed in should be the one
+ // for the relevant runnable and may be null. aIsIdleRunnable should be true
+ // if and only if the runnable has idle priority.
+ Snapshot RunnableWillRun(PerformanceCounter* Counter, TimeStamp aNow,
+ bool aIsIdleRunnable);
+
+ // Notification that a runnable finished executing. This must be passed the
+ // snapshot that RunnableWillRun returned for the same runnable. This must be
+ // called before mNestedEventLoopDepth is decremented after the runnable's
+ // execution.
+ void RunnableDidRun(const nsCString& aName, Snapshot&& aSnapshot);
+
+ const TimeStamp& LastLongTaskEnd() const { return mLastLongTaskEnd; }
+ const TimeStamp& LastLongNonIdleTaskEnd() const {
+ return mLastLongNonIdleTaskEnd;
+ }
+
+ private:
+ // Called to report accumulated time, as needed, when we're about to run a
+ // runnable or just finished running one.
+ void MaybeReportAccumulatedTime(const nsCString& aName, TimeStamp aNow);
+
+ // Whether the runnable we are about to run, or just ran, is a nested
+ // runnable, in the sense that there is some other runnable up the stack
+ // spinning the event loop. This must be called before we change our
+ // mCurrentEventLoopDepth (when about to run a new event) or after we restore
+ // it (after we ran one).
+ bool IsNestedRunnable() const {
+ return mNestedEventLoopDepth > mCurrentEventLoopDepth;
+ }
+
+ // The event loop depth of the currently running runnable. Set to the max
+ // value of a uint32_t when there is no runnable running, so when starting to
+ // run a toplevel (not nested) runnable IsNestedRunnable() will test false.
+ uint32_t mCurrentEventLoopDepth = std::numeric_limits<uint32_t>::max();
+
+ // A reference to the nsThread's mNestedEventLoopDepth, so we can
+ // see what it is right now.
+ const uint32_t& mNestedEventLoopDepth;
+
+ // A boolean that indicates whether the currently running runnable is an idle
+ // runnable. Only has a useful value between RunnableWillRun() being called
+ // and RunnableDidRun() returning.
+ bool mCurrentRunnableIsIdleRunnable = false;
+
+ // Whether we're attached to the mainthread nsThread.
+ const bool mIsMainThread;
+
+ // The timestamp from which time to be accounted for should be measured. This
+ // can be the start of a runnable running or the end of a nested runnable
+ // running.
+ TimeStamp mCurrentTimeSliceStart;
+
+ // Information about when long tasks last ended.
+ TimeStamp mLastLongTaskEnd;
+ TimeStamp mLastLongNonIdleTaskEnd;
+
+ // The performance counter to use for accumulating the runtime of
+ // the currently running event. May be null, in which case the
+ // event's running time should not be accounted to any performance
+ // counters.
+ RefPtr<PerformanceCounter> mCurrentPerformanceCounter;
+};
+} // namespace mozilla
+
+// A native thread
+class nsThread : public nsIThreadInternal,
+ public nsISupportsPriority,
+ public nsIDirectTaskDispatcher,
+ private mozilla::LinkedListElement<nsThread> {
+ friend mozilla::LinkedList<nsThread>;
+ friend mozilla::LinkedListElement<nsThread>;
+
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIEVENTTARGET_FULL
+ NS_DECL_NSITHREAD
+ NS_DECL_NSITHREADINTERNAL
+ NS_DECL_NSISUPPORTSPRIORITY
+ NS_DECL_NSIDIRECTTASKDISPATCHER
+
+ enum MainThreadFlag { MAIN_THREAD, NOT_MAIN_THREAD };
+
+ nsThread(NotNull<mozilla::SynchronizedEventQueue*> aQueue,
+ MainThreadFlag aMainThread,
+ nsIThreadManager::ThreadCreationOptions aOptions);
+
+ private:
+ nsThread();
+
+ public:
+ // Initialize this as a named wrapper for a new PRThread.
+ nsresult Init(const nsACString& aName);
+
+ // Initialize this as a wrapper for the current PRThread.
+ nsresult InitCurrentThread();
+
+ // Get this thread's name, thread-safe.
+ void GetThreadName(nsACString& aNameBuffer);
+
+ // Set this thread's name. Consider using
+ // NS_SetCurrentThreadName if you are not sure.
+ void SetThreadNameInternal(const nsACString& aName);
+
+ private:
+ // Initializes the mThreadId and stack base/size members, and adds the thread
+ // to the ThreadList().
+ void InitCommon();
+
+ public:
+ // The PRThread corresponding to this thread.
+ PRThread* GetPRThread() const { return mThread; }
+
+ const void* StackBase() const { return mStackBase; }
+ size_t StackSize() const { return mStackSize; }
+
+ uint32_t ThreadId() const { return mThreadId; }
+
+ // If this flag is true, then the nsThread was created using
+ // nsIThreadManager::NewThread.
+ bool ShutdownRequired() { return mShutdownRequired; }
+
+ // Lets GetRunningEventDelay() determine if the pool this is part
+ // of has an unstarted thread
+ void SetPoolThreadFreePtr(mozilla::Atomic<bool, mozilla::Relaxed>* aPtr) {
+ mIsAPoolThreadFree = aPtr;
+ }
+
+ void SetScriptObserver(mozilla::CycleCollectedJSContext* aScriptObserver);
+
+ uint32_t RecursionDepth() const;
+
+ void ShutdownComplete(NotNull<nsThreadShutdownContext*> aContext);
+
+ void WaitForAllAsynchronousShutdowns();
+
+ static const uint32_t kRunnableNameBufSize = 1000;
+ static mozilla::Array<char, kRunnableNameBufSize> sMainThreadRunnableName;
+
+ mozilla::SynchronizedEventQueue* EventQueue() { return mEvents.get(); }
+
+ bool ShuttingDown() const { return mShutdownContext != nullptr; }
+
+ static bool GetLabeledRunnableName(nsIRunnable* aEvent, nsACString& aName,
+ mozilla::EventQueuePriority aPriority);
+
+ virtual mozilla::PerformanceCounter* GetPerformanceCounter(
+ nsIRunnable* aEvent) const;
+
+ static mozilla::PerformanceCounter* GetPerformanceCounterBase(
+ nsIRunnable* aEvent);
+
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
+
+ // Returns the size of this object, its PRThread, and its shutdown contexts,
+ // but excluding its event queues.
+ size_t ShallowSizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
+
+ size_t SizeOfEventQueues(mozilla::MallocSizeOf aMallocSizeOf) const;
+
+ static nsThreadEnumerator Enumerate();
+
+ void SetUseHangMonitor(bool aValue) {
+ MOZ_ASSERT(IsOnCurrentThread());
+ mUseHangMonitor = aValue;
+ }
+
+ private:
+ void DoMainThreadSpecificProcessing() const;
+
+ protected:
+ friend class nsThreadShutdownEvent;
+
+ friend class nsThreadEnumerator;
+
+ virtual ~nsThread();
+
+ static void ThreadFunc(void* aArg);
+
+ // Helper
+ already_AddRefed<nsIThreadObserver> GetObserver() {
+ nsIThreadObserver* obs;
+ nsThread::GetObserver(&obs);
+ return already_AddRefed<nsIThreadObserver>(obs);
+ }
+
+ already_AddRefed<nsThreadShutdownContext> ShutdownInternal(bool aSync);
+
+ friend class nsThreadManager;
+ friend class nsThreadPool;
+
+ static mozilla::OffTheBooksMutex& ThreadListMutex();
+ static mozilla::LinkedList<nsThread>& ThreadList();
+
+ void AddToThreadList();
+ void MaybeRemoveFromThreadList();
+
+ // Whether or not these members have a value determines whether the nsThread
+ // is treated as a full XPCOM thread or as a thin wrapper.
+ //
+ // For full nsThreads, they will always contain valid pointers. For thin
+ // wrappers around non-XPCOM threads, they will be null, and event dispatch
+ // methods which rely on them will fail (and assert) if called.
+ RefPtr<mozilla::SynchronizedEventQueue> mEvents;
+ RefPtr<mozilla::ThreadEventTarget> mEventTarget;
+
+ // The number of outstanding nsThreadShutdownContext started by this thread.
+ // The thread will not be allowed to exit until this number reaches 0.
+ uint32_t mOutstandingShutdownContexts;
+ // The shutdown context for ourselves.
+ RefPtr<nsThreadShutdownContext> mShutdownContext;
+
+ mozilla::CycleCollectedJSContext* mScriptObserver;
+
+ // Our name.
+ mozilla::DataMutex<nsCString> mThreadName;
+
+ void* mStackBase = nullptr;
+ uint32_t mStackSize;
+ uint32_t mThreadId;
+
+ uint32_t mNestedEventLoopDepth;
+
+ mozilla::Atomic<bool> mShutdownRequired;
+
+ int8_t mPriority;
+
+ const bool mIsMainThread;
+ bool mUseHangMonitor;
+ const bool mIsUiThread;
+ mozilla::Atomic<bool, mozilla::Relaxed>* mIsAPoolThreadFree;
+
+ // Set to true if this thread creates a JSRuntime.
+ bool mCanInvokeJS;
+
+ // The time the currently running event spent in event queues, and
+ // when it started running. If no event is running, they are
+ // TimeDuration() & TimeStamp().
+ mozilla::TimeDuration mLastEventDelay;
+ mozilla::TimeStamp mLastEventStart;
+
+#ifdef EARLY_BETA_OR_EARLIER
+ nsCString mNameForWakeupTelemetry;
+ mozilla::TimeStamp mLastWakeupCheckTime;
+ uint32_t mWakeupCount = 0;
+#endif
+
+ mozilla::PerformanceCounterState mPerformanceCounterState;
+
+ mozilla::SimpleTaskQueue mDirectTasks;
+};
+
+class nsThreadShutdownContext final : public nsIThreadShutdown {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSITHREADSHUTDOWN
+
+ private:
+ friend class nsThread;
+ friend class nsThreadShutdownEvent;
+ friend class nsThreadShutdownAckEvent;
+
+ nsThreadShutdownContext(NotNull<nsThread*> aTerminatingThread,
+ nsThread* aJoiningThread)
+ : mTerminatingThread(aTerminatingThread),
+ mTerminatingPRThread(aTerminatingThread->GetPRThread()),
+ mJoiningThreadMutex("nsThreadShutdownContext::mJoiningThreadMutex"),
+ mJoiningThread(aJoiningThread) {}
+
+ ~nsThreadShutdownContext() = default;
+
+ // Must be called on the joining thread.
+ void MarkCompleted();
+
+ // NB: This may be the last reference.
+ NotNull<RefPtr<nsThread>> const mTerminatingThread;
+ PRThread* const mTerminatingPRThread;
+
+ // May only be accessed on the joining thread.
+ bool mCompleted = false;
+ nsTArray<nsCOMPtr<nsIRunnable>> mCompletionCallbacks;
+
+ // The thread waiting for this thread to shut down. Will either be cleared by
+ // the joining thread if `StopWaitingAndLeakThread` is called or by the
+ // terminating thread upon exiting and notifying the joining thread.
+ mozilla::Mutex mJoiningThreadMutex;
+ RefPtr<nsThread> mJoiningThread MOZ_GUARDED_BY(mJoiningThreadMutex);
+ bool mThreadLeaked MOZ_GUARDED_BY(mJoiningThreadMutex) = false;
+};
+
+class MOZ_STACK_CLASS nsThreadEnumerator final {
+ public:
+ nsThreadEnumerator() = default;
+
+ auto begin() { return nsThread::ThreadList().begin(); }
+ auto end() { return nsThread::ThreadList().end(); }
+
+ private:
+ mozilla::OffTheBooksMutexAutoLock mMal{nsThread::ThreadListMutex()};
+};
+
+#if defined(XP_UNIX) && !defined(ANDROID) && !defined(DEBUG) && HAVE_UALARM && \
+ defined(_GNU_SOURCE)
+# define MOZ_CANARY
+
+extern int sCanaryOutputFD;
+#endif
+
+#endif // nsThread_h__
diff --git a/xpcom/threads/nsThreadManager.cpp b/xpcom/threads/nsThreadManager.cpp
new file mode 100644
index 0000000000..367b520a54
--- /dev/null
+++ b/xpcom/threads/nsThreadManager.cpp
@@ -0,0 +1,798 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsThreadManager.h"
+#include "nsThread.h"
+#include "nsThreadPool.h"
+#include "nsThreadUtils.h"
+#include "nsIClassInfoImpl.h"
+#include "nsExceptionHandler.h"
+#include "nsTArray.h"
+#include "nsXULAppAPI.h"
+#include "nsExceptionHandler.h"
+#include "mozilla/AbstractThread.h"
+#include "mozilla/AppShutdown.h"
+#include "mozilla/ClearOnShutdown.h"
+#include "mozilla/CycleCollectedJSContext.h" // nsAutoMicroTask
+#include "mozilla/EventQueue.h"
+#include "mozilla/InputTaskManager.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/ProfilerMarkers.h"
+#include "mozilla/SpinEventLoopUntil.h"
+#include "mozilla/StaticPtr.h"
+#include "mozilla/TaskQueue.h"
+#include "mozilla/ThreadEventQueue.h"
+#include "mozilla/ThreadLocal.h"
+#include "TaskController.h"
+#include "ThreadEventTarget.h"
+#ifdef MOZ_CANARY
+# include <fcntl.h>
+# include <unistd.h>
+#endif
+
+#include "MainThreadIdlePeriod.h"
+
+using namespace mozilla;
+
+static MOZ_THREAD_LOCAL(bool) sTLSIsMainThread;
+
+bool NS_IsMainThreadTLSInitialized() { return sTLSIsMainThread.initialized(); }
+
+class BackgroundEventTarget final : public nsIEventTarget,
+ public TaskQueueTracker {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIEVENTTARGET_FULL
+
+ BackgroundEventTarget() = default;
+
+ nsresult Init();
+
+ already_AddRefed<nsISerialEventTarget> CreateBackgroundTaskQueue(
+ const char* aName);
+
+ void BeginShutdown(nsTArray<RefPtr<ShutdownPromise>>&);
+ void FinishShutdown();
+
+ private:
+ ~BackgroundEventTarget() = default;
+
+ nsCOMPtr<nsIThreadPool> mPool;
+ nsCOMPtr<nsIThreadPool> mIOPool;
+};
+
+NS_IMPL_ISUPPORTS(BackgroundEventTarget, nsIEventTarget, TaskQueueTracker)
+
+nsresult BackgroundEventTarget::Init() {
+ nsCOMPtr<nsIThreadPool> pool(new nsThreadPool());
+ NS_ENSURE_TRUE(pool, NS_ERROR_FAILURE);
+
+ nsresult rv = pool->SetName("BackgroundThreadPool"_ns);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Use potentially more conservative stack size.
+ rv = pool->SetThreadStackSize(nsIThreadManager::kThreadPoolStackSize);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Thread limit of 2 makes deadlock during synchronous dispatch less likely.
+ rv = pool->SetThreadLimit(2);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = pool->SetIdleThreadLimit(1);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Leave threads alive for up to 5 minutes
+ rv = pool->SetIdleThreadTimeout(300000);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Initialize the background I/O event target.
+ nsCOMPtr<nsIThreadPool> ioPool(new nsThreadPool());
+ NS_ENSURE_TRUE(pool, NS_ERROR_FAILURE);
+
+ // The io pool spends a lot of its time blocking on io, so we want to offload
+ // these jobs on a lower priority if available.
+ rv = ioPool->SetQoSForThreads(nsIThread::QOS_PRIORITY_LOW);
+ NS_ENSURE_SUCCESS(
+ rv, rv); // note: currently infallible, keeping this for brevity.
+
+ rv = ioPool->SetName("BgIOThreadPool"_ns);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Use potentially more conservative stack size.
+ rv = ioPool->SetThreadStackSize(nsIThreadManager::kThreadPoolStackSize);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Thread limit of 4 makes deadlock during synchronous dispatch less likely.
+ rv = ioPool->SetThreadLimit(4);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = ioPool->SetIdleThreadLimit(1);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Leave threads alive for up to 5 minutes
+ rv = ioPool->SetIdleThreadTimeout(300000);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ pool.swap(mPool);
+ ioPool.swap(mIOPool);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP_(bool)
+BackgroundEventTarget::IsOnCurrentThreadInfallible() {
+ return mPool->IsOnCurrentThread() || mIOPool->IsOnCurrentThread();
+}
+
+NS_IMETHODIMP
+BackgroundEventTarget::IsOnCurrentThread(bool* aValue) {
+ bool value = false;
+ if (NS_SUCCEEDED(mPool->IsOnCurrentThread(&value)) && value) {
+ *aValue = value;
+ return NS_OK;
+ }
+ return mIOPool->IsOnCurrentThread(aValue);
+}
+
+NS_IMETHODIMP
+BackgroundEventTarget::Dispatch(already_AddRefed<nsIRunnable> aRunnable,
+ uint32_t aFlags) {
+ // We need to be careful here, because if an event is getting dispatched here
+ // from within TaskQueue::Runner::Run, it will be dispatched with
+ // NS_DISPATCH_AT_END, but we might not be running the event on the same
+ // pool, depending on which pool we were on and the dispatch flags. If we
+ // dispatch an event with NS_DISPATCH_AT_END to the wrong pool, the pool
+ // may not process the event in a timely fashion, which can lead to deadlock.
+ uint32_t flags = aFlags & ~NS_DISPATCH_EVENT_MAY_BLOCK;
+ bool mayBlock = bool(aFlags & NS_DISPATCH_EVENT_MAY_BLOCK);
+ nsCOMPtr<nsIThreadPool>& pool = mayBlock ? mIOPool : mPool;
+
+ // If we're already running on the pool we want to dispatch to, we can
+ // unconditionally add NS_DISPATCH_AT_END to indicate that we shouldn't spin
+ // up a new thread.
+ //
+ // Otherwise, we should remove NS_DISPATCH_AT_END so we don't run into issues
+ // like those in the above comment.
+ if (pool->IsOnCurrentThread()) {
+ flags |= NS_DISPATCH_AT_END;
+ } else {
+ flags &= ~NS_DISPATCH_AT_END;
+ }
+
+ return pool->Dispatch(std::move(aRunnable), flags);
+}
+
+NS_IMETHODIMP
+BackgroundEventTarget::DispatchFromScript(nsIRunnable* aRunnable,
+ uint32_t aFlags) {
+ nsCOMPtr<nsIRunnable> runnable(aRunnable);
+ return Dispatch(runnable.forget(), aFlags);
+}
+
+NS_IMETHODIMP
+BackgroundEventTarget::DelayedDispatch(already_AddRefed<nsIRunnable> aRunnable,
+ uint32_t) {
+ nsCOMPtr<nsIRunnable> dropRunnable(aRunnable);
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+BackgroundEventTarget::RegisterShutdownTask(nsITargetShutdownTask* aTask) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+BackgroundEventTarget::UnregisterShutdownTask(nsITargetShutdownTask* aTask) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+void BackgroundEventTarget::BeginShutdown(
+ nsTArray<RefPtr<ShutdownPromise>>& promises) {
+ auto queues = GetAllTrackedTaskQueues();
+ for (auto& queue : queues) {
+ promises.AppendElement(queue->BeginShutdown());
+ }
+}
+
+void BackgroundEventTarget::FinishShutdown() {
+ mPool->Shutdown();
+ mIOPool->Shutdown();
+}
+
+already_AddRefed<nsISerialEventTarget>
+BackgroundEventTarget::CreateBackgroundTaskQueue(const char* aName) {
+ return TaskQueue::Create(do_AddRef(this), aName).forget();
+}
+
+extern "C" {
+// This uses the C language linkage because it's exposed to Rust
+// via the xpcom/rust/moz_task crate.
+bool NS_IsMainThread() { return sTLSIsMainThread.get(); }
+}
+
+void NS_SetMainThread() {
+ if (!sTLSIsMainThread.init()) {
+ MOZ_CRASH();
+ }
+ sTLSIsMainThread.set(true);
+ MOZ_ASSERT(NS_IsMainThread());
+ // We initialize the SerialEventTargetGuard's TLS here for simplicity as it
+ // needs to be initialized around the same time you would initialize
+ // sTLSIsMainThread.
+ SerialEventTargetGuard::InitTLS();
+ nsThreadPool::InitTLS();
+}
+
+#ifdef DEBUG
+
+namespace mozilla {
+
+void AssertIsOnMainThread() { MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); }
+
+} // namespace mozilla
+
+#endif
+
+typedef nsTArray<NotNull<RefPtr<nsThread>>> nsThreadArray;
+
+//-----------------------------------------------------------------------------
+
+/* static */
+void nsThreadManager::ReleaseThread(void* aData) {
+ static_cast<nsThread*>(aData)->Release();
+}
+
+// statically allocated instance
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsThreadManager::AddRef() { return 2; }
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsThreadManager::Release() { return 1; }
+NS_IMPL_CLASSINFO(nsThreadManager, nullptr,
+ nsIClassInfo::THREADSAFE | nsIClassInfo::SINGLETON,
+ NS_THREADMANAGER_CID)
+NS_IMPL_QUERY_INTERFACE_CI(nsThreadManager, nsIThreadManager)
+NS_IMPL_CI_INTERFACE_GETTER(nsThreadManager, nsIThreadManager)
+
+//-----------------------------------------------------------------------------
+
+/*static*/ nsThreadManager& nsThreadManager::get() {
+ static nsThreadManager sInstance;
+ return sInstance;
+}
+
+nsThreadManager::nsThreadManager()
+ : mCurThreadIndex(0), mMainPRThread(nullptr), mInitialized(false) {}
+
+nsThreadManager::~nsThreadManager() = default;
+
+nsresult nsThreadManager::Init() {
+ // Child processes need to initialize the thread manager before they
+ // initialize XPCOM in order to set up the crash reporter. This leads to
+ // situations where we get initialized twice.
+ if (mInitialized) {
+ return NS_OK;
+ }
+
+ if (PR_NewThreadPrivateIndex(&mCurThreadIndex, ReleaseThread) == PR_FAILURE) {
+ return NS_ERROR_FAILURE;
+ }
+
+#ifdef MOZ_CANARY
+ const int flags = O_WRONLY | O_APPEND | O_CREAT | O_NONBLOCK;
+ const mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+ char* env_var_flag = getenv("MOZ_KILL_CANARIES");
+ sCanaryOutputFD =
+ env_var_flag
+ ? (env_var_flag[0] ? open(env_var_flag, flags, mode) : STDERR_FILENO)
+ : 0;
+#endif
+
+ TaskController::Initialize();
+
+ // Initialize idle handling.
+ nsCOMPtr<nsIIdlePeriod> idlePeriod = new MainThreadIdlePeriod();
+ TaskController::Get()->SetIdleTaskManager(
+ new IdleTaskManager(idlePeriod.forget()));
+
+ // Create main thread queue that forwards events to TaskController and
+ // construct main thread.
+ UniquePtr<EventQueue> queue = MakeUnique<EventQueue>(true);
+
+ RefPtr<ThreadEventQueue> synchronizedQueue =
+ new ThreadEventQueue(std::move(queue), true);
+
+ mMainThread = new nsThread(WrapNotNull(synchronizedQueue),
+ nsThread::MAIN_THREAD, {.stackSize = 0});
+
+ nsresult rv = mMainThread->InitCurrentThread();
+ if (NS_FAILED(rv)) {
+ mMainThread = nullptr;
+ return rv;
+ }
+
+ // We need to keep a pointer to the current thread, so we can satisfy
+ // GetIsMainThread calls that occur post-Shutdown.
+ mMainThread->GetPRThread(&mMainPRThread);
+
+ // Init AbstractThread.
+ AbstractThread::InitTLS();
+ AbstractThread::InitMainThread();
+
+ // Initialize the background event target.
+ RefPtr<BackgroundEventTarget> target(new BackgroundEventTarget());
+
+ rv = target->Init();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mBackgroundEventTarget = std::move(target);
+
+ mInitialized = true;
+
+ return NS_OK;
+}
+
+void nsThreadManager::ShutdownNonMainThreads() {
+ MOZ_ASSERT(NS_IsMainThread(), "shutdown not called from main thread");
+
+ // Prevent further access to the thread manager (no more new threads!)
+ //
+ // What happens if shutdown happens before NewThread completes?
+ // We Shutdown() the new thread, and return error if we've started Shutdown
+ // between when NewThread started, and when the thread finished initializing
+ // and registering with ThreadManager.
+ //
+ mInitialized = false;
+
+ // Empty the main thread event queue before we begin shutting down threads.
+ NS_ProcessPendingEvents(mMainThread);
+
+ mMainThread->mEvents->RunShutdownTasks();
+
+ nsTArray<RefPtr<ShutdownPromise>> promises;
+ mBackgroundEventTarget->BeginShutdown(promises);
+
+ bool taskQueuesShutdown = false;
+ // It's fine to capture everything by reference in the Then handler since it
+ // runs before we exit the nested event loop, thanks to the SpinEventLoopUntil
+ // below.
+ ShutdownPromise::All(mMainThread, promises)->Then(mMainThread, __func__, [&] {
+ mBackgroundEventTarget->FinishShutdown();
+ taskQueuesShutdown = true;
+ });
+
+ // Wait for task queues to shutdown, so we don't shut down the underlying
+ // threads of the background event target in the block below, thereby
+ // preventing the task queues from emptying, preventing the shutdown promises
+ // from resolving, and prevent anything checking `taskQueuesShutdown` from
+ // working.
+ mozilla::SpinEventLoopUntil(
+ "nsThreadManager::Shutdown"_ns, [&]() { return taskQueuesShutdown; },
+ mMainThread);
+
+ {
+ // We gather the threads into a list, so that we avoid holding the
+ // enumerator lock while calling nsIThread::Shutdown.
+ nsTArray<RefPtr<nsThread>> threadsToShutdown;
+ for (auto* thread : nsThread::Enumerate()) {
+ if (thread->ShutdownRequired()) {
+ threadsToShutdown.AppendElement(thread);
+ }
+ }
+
+ // It's tempting to walk the list of threads here and tell them each to stop
+ // accepting new events, but that could lead to badness if one of those
+ // threads is stuck waiting for a response from another thread. To do it
+ // right, we'd need some way to interrupt the threads.
+ //
+ // Instead, we process events on the current thread while waiting for
+ // threads to shutdown. This means that we have to preserve a mostly
+ // functioning world until such time as the threads exit.
+
+ // Shutdown all threads that require it (join with threads that we created).
+ for (auto& thread : threadsToShutdown) {
+ thread->Shutdown();
+ }
+ }
+
+ // NB: It's possible that there are events in the queue that want to *start*
+ // an asynchronous shutdown. But we have already shutdown the threads above,
+ // so there's no need to worry about them. We only have to wait for all
+ // in-flight asynchronous thread shutdowns to complete.
+ mMainThread->WaitForAllAsynchronousShutdowns();
+
+ // There are no more background threads at this point.
+}
+
+void nsThreadManager::ShutdownMainThread() {
+ MOZ_ASSERT(!mInitialized, "Must have called BeginShutdown");
+
+ // Do NS_ProcessPendingEvents but with special handling to set
+ // mEventsAreDoomed atomically with the removal of the last event. This means
+ // that PutEvent cannot succeed if the event would be left in the main thread
+ // queue after our final call to NS_ProcessPendingEvents.
+ // See comments in `nsThread::ThreadFunc` for a more detailed explanation.
+ while (true) {
+ if (mMainThread->mEvents->ShutdownIfNoPendingEvents()) {
+ break;
+ }
+ NS_ProcessPendingEvents(mMainThread);
+ }
+
+ // Normally thread shutdown clears the observer for the thread, but since the
+ // main thread is special we do it manually here after we're sure all events
+ // have been processed.
+ mMainThread->SetObserver(nullptr);
+
+ mBackgroundEventTarget = nullptr;
+}
+
+void nsThreadManager::ReleaseMainThread() {
+ MOZ_ASSERT(!mInitialized, "Must have called BeginShutdown");
+ MOZ_ASSERT(!mBackgroundEventTarget, "Must have called ShutdownMainThread");
+ MOZ_ASSERT(mMainThread);
+
+ // Release main thread object.
+ mMainThread = nullptr;
+
+ // Remove the TLS entry for the main thread.
+ PR_SetThreadPrivate(mCurThreadIndex, nullptr);
+}
+
+void nsThreadManager::RegisterCurrentThread(nsThread& aThread) {
+ MOZ_ASSERT(aThread.GetPRThread() == PR_GetCurrentThread(), "bad aThread");
+
+ aThread.AddRef(); // for TLS entry
+ PR_SetThreadPrivate(mCurThreadIndex, &aThread);
+}
+
+void nsThreadManager::UnregisterCurrentThread(nsThread& aThread) {
+ MOZ_ASSERT(aThread.GetPRThread() == PR_GetCurrentThread(), "bad aThread");
+
+ PR_SetThreadPrivate(mCurThreadIndex, nullptr);
+ // Ref-count balanced via ReleaseThread
+}
+
+nsThread* nsThreadManager::CreateCurrentThread(
+ SynchronizedEventQueue* aQueue, nsThread::MainThreadFlag aMainThread) {
+ // Make sure we don't have an nsThread yet.
+ MOZ_ASSERT(!PR_GetThreadPrivate(mCurThreadIndex));
+
+ if (!mInitialized) {
+ return nullptr;
+ }
+
+ RefPtr<nsThread> thread =
+ new nsThread(WrapNotNull(aQueue), aMainThread, {.stackSize = 0});
+ if (!thread || NS_FAILED(thread->InitCurrentThread())) {
+ return nullptr;
+ }
+
+ return thread.get(); // reference held in TLS
+}
+
+nsresult nsThreadManager::DispatchToBackgroundThread(nsIRunnable* aEvent,
+ uint32_t aDispatchFlags) {
+ if (!mInitialized) {
+ return NS_ERROR_FAILURE;
+ }
+
+ nsCOMPtr<nsIEventTarget> backgroundTarget(mBackgroundEventTarget);
+ return backgroundTarget->Dispatch(aEvent, aDispatchFlags);
+}
+
+already_AddRefed<nsISerialEventTarget>
+nsThreadManager::CreateBackgroundTaskQueue(const char* aName) {
+ if (!mInitialized) {
+ return nullptr;
+ }
+
+ return mBackgroundEventTarget->CreateBackgroundTaskQueue(aName);
+}
+
+nsThread* nsThreadManager::GetCurrentThread() {
+ // read thread local storage
+ void* data = PR_GetThreadPrivate(mCurThreadIndex);
+ if (data) {
+ return static_cast<nsThread*>(data);
+ }
+
+ if (!mInitialized) {
+ return nullptr;
+ }
+
+ // OK, that's fine. We'll dynamically create one :-)
+ //
+ // We assume that if we're implicitly creating a thread here that it doesn't
+ // want an event queue. Any thread which wants an event queue should
+ // explicitly create its nsThread wrapper.
+ RefPtr<nsThread> thread = new nsThread();
+ if (!thread || NS_FAILED(thread->InitCurrentThread())) {
+ return nullptr;
+ }
+
+ return thread.get(); // reference held in TLS
+}
+
+bool nsThreadManager::IsNSThread() const {
+ if (!mInitialized) {
+ return false;
+ }
+ if (auto* thread = (nsThread*)PR_GetThreadPrivate(mCurThreadIndex)) {
+ return thread->EventQueue();
+ }
+ return false;
+}
+
+NS_IMETHODIMP
+nsThreadManager::NewNamedThread(
+ const nsACString& aName, nsIThreadManager::ThreadCreationOptions aOptions,
+ nsIThread** aResult) {
+ // Note: can be called from arbitrary threads
+
+ // No new threads during Shutdown
+ if (NS_WARN_IF(!mInitialized)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ [[maybe_unused]] TimeStamp startTime = TimeStamp::Now();
+
+ RefPtr<ThreadEventQueue> queue =
+ new ThreadEventQueue(MakeUnique<EventQueue>());
+ RefPtr<nsThread> thr =
+ new nsThread(WrapNotNull(queue), nsThread::NOT_MAIN_THREAD, aOptions);
+ nsresult rv =
+ thr->Init(aName); // Note: blocks until the new thread has been set up
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // At this point, we expect that the thread has been registered in
+ // mThreadByPRThread; however, it is possible that it could have also been
+ // replaced by now, so we cannot really assert that it was added. Instead,
+ // kill it if we entered Shutdown() during/before Init()
+
+ if (NS_WARN_IF(!mInitialized)) {
+ if (thr->ShutdownRequired()) {
+ thr->Shutdown(); // ok if it happens multiple times
+ }
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ PROFILER_MARKER_TEXT(
+ "NewThread", OTHER,
+ MarkerOptions(MarkerStack::Capture(),
+ MarkerTiming::IntervalUntilNowFrom(startTime)),
+ aName);
+ if (!NS_IsMainThread()) {
+ PROFILER_MARKER_TEXT(
+ "NewThread (non-main thread)", OTHER,
+ MarkerOptions(MarkerStack::Capture(), MarkerThreadId::MainThread(),
+ MarkerTiming::IntervalUntilNowFrom(startTime)),
+ aName);
+ }
+
+ thr.forget(aResult);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadManager::GetMainThread(nsIThread** aResult) {
+ // Keep this functioning during Shutdown
+ if (!mMainThread) {
+ if (!NS_IsMainThread()) {
+ NS_WARNING(
+ "Called GetMainThread but there isn't a main thread and "
+ "we're not the main thread.");
+ }
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ NS_ADDREF(*aResult = mMainThread);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadManager::GetCurrentThread(nsIThread** aResult) {
+ // Keep this functioning during Shutdown
+ if (!mMainThread) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ *aResult = GetCurrentThread();
+ if (!*aResult) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ NS_ADDREF(*aResult);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadManager::SpinEventLoopUntil(const nsACString& aVeryGoodReasonToDoThis,
+ nsINestedEventLoopCondition* aCondition) {
+ return SpinEventLoopUntilInternal(aVeryGoodReasonToDoThis, aCondition,
+ ShutdownPhase::NotInShutdown);
+}
+
+NS_IMETHODIMP
+nsThreadManager::SpinEventLoopUntilOrQuit(
+ const nsACString& aVeryGoodReasonToDoThis,
+ nsINestedEventLoopCondition* aCondition) {
+ return SpinEventLoopUntilInternal(aVeryGoodReasonToDoThis, aCondition,
+ ShutdownPhase::AppShutdownConfirmed);
+}
+
+// statics from SpinEventLoopUntil.h
+AutoNestedEventLoopAnnotation* AutoNestedEventLoopAnnotation::sCurrent =
+ nullptr;
+StaticMutex AutoNestedEventLoopAnnotation::sStackMutex;
+
+// static from SpinEventLoopUntil.h
+void AutoNestedEventLoopAnnotation::AnnotateXPCOMSpinEventLoopStack(
+ const nsACString& aStack) {
+ if (aStack.Length() > 0) {
+ nsCString prefixedStack(XRE_GetProcessTypeString());
+ prefixedStack += ": "_ns + aStack;
+ CrashReporter::AnnotateCrashReport(
+ CrashReporter::Annotation::XPCOMSpinEventLoopStack, prefixedStack);
+ } else {
+ CrashReporter::AnnotateCrashReport(
+ CrashReporter::Annotation::XPCOMSpinEventLoopStack, ""_ns);
+ }
+}
+
+nsresult nsThreadManager::SpinEventLoopUntilInternal(
+ const nsACString& aVeryGoodReasonToDoThis,
+ nsINestedEventLoopCondition* aCondition,
+ ShutdownPhase aShutdownPhaseToCheck) {
+ // XXX: We would want to AssertIsOnMainThread(); but that breaks some GTest.
+ nsCOMPtr<nsINestedEventLoopCondition> condition(aCondition);
+ nsresult rv = NS_OK;
+
+ if (!mozilla::SpinEventLoopUntil(aVeryGoodReasonToDoThis, [&]() -> bool {
+ // Check if an ongoing shutdown reached our limits.
+ if (aShutdownPhaseToCheck > ShutdownPhase::NotInShutdown &&
+ AppShutdown::GetCurrentShutdownPhase() >= aShutdownPhaseToCheck) {
+ return true;
+ }
+
+ bool isDone = false;
+ rv = condition->IsDone(&isDone);
+ // JS failure should be unusual, but we need to stop and propagate
+ // the error back to the caller.
+ if (NS_FAILED(rv)) {
+ return true;
+ }
+
+ return isDone;
+ })) {
+ // We stopped early for some reason, which is unexpected.
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // If we exited when the condition told us to, we need to return whether
+ // the condition encountered failure when executing.
+ return rv;
+}
+
+NS_IMETHODIMP
+nsThreadManager::SpinEventLoopUntilEmpty() {
+ nsIThread* thread = NS_GetCurrentThread();
+
+ while (NS_HasPendingEvents(thread)) {
+ (void)NS_ProcessNextEvent(thread, false);
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadManager::GetMainThreadEventTarget(nsIEventTarget** aTarget) {
+ nsCOMPtr<nsIEventTarget> target = GetMainThreadSerialEventTarget();
+ target.forget(aTarget);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadManager::DispatchToMainThread(nsIRunnable* aEvent, uint32_t aPriority,
+ uint8_t aArgc) {
+ // Note: C++ callers should instead use NS_DispatchToMainThread.
+ MOZ_ASSERT(NS_IsMainThread());
+
+ // Keep this functioning during Shutdown
+ if (NS_WARN_IF(!mMainThread)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ // If aPriority wasn't explicitly passed, that means it should be treated as
+ // PRIORITY_NORMAL.
+ if (aArgc > 0 && aPriority != nsIRunnablePriority::PRIORITY_NORMAL) {
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return mMainThread->DispatchFromScript(
+ new PrioritizableRunnable(event.forget(), aPriority), 0);
+ }
+ return mMainThread->DispatchFromScript(aEvent, 0);
+}
+
+class AutoMicroTaskWrapperRunnable final : public Runnable {
+ public:
+ explicit AutoMicroTaskWrapperRunnable(nsIRunnable* aEvent)
+ : Runnable("AutoMicroTaskWrapperRunnable"), mEvent(aEvent) {
+ MOZ_ASSERT(aEvent);
+ }
+
+ private:
+ ~AutoMicroTaskWrapperRunnable() = default;
+
+ NS_IMETHOD Run() override {
+ nsAutoMicroTask mt;
+
+ return mEvent->Run();
+ }
+
+ RefPtr<nsIRunnable> mEvent;
+};
+
+NS_IMETHODIMP
+nsThreadManager::DispatchToMainThreadWithMicroTask(nsIRunnable* aEvent,
+ uint32_t aPriority,
+ uint8_t aArgc) {
+ RefPtr<AutoMicroTaskWrapperRunnable> runnable =
+ new AutoMicroTaskWrapperRunnable(aEvent);
+
+ return DispatchToMainThread(runnable, aPriority, aArgc);
+}
+
+void nsThreadManager::EnableMainThreadEventPrioritization() {
+ MOZ_ASSERT(NS_IsMainThread());
+ InputTaskManager::Get()->EnableInputEventPrioritization();
+}
+
+void nsThreadManager::FlushInputEventPrioritization() {
+ MOZ_ASSERT(NS_IsMainThread());
+ InputTaskManager::Get()->FlushInputEventPrioritization();
+}
+
+void nsThreadManager::SuspendInputEventPrioritization() {
+ MOZ_ASSERT(NS_IsMainThread());
+ InputTaskManager::Get()->SuspendInputEventPrioritization();
+}
+
+void nsThreadManager::ResumeInputEventPrioritization() {
+ MOZ_ASSERT(NS_IsMainThread());
+ InputTaskManager::Get()->ResumeInputEventPrioritization();
+}
+
+// static
+bool nsThreadManager::MainThreadHasPendingHighPriorityEvents() {
+ MOZ_ASSERT(NS_IsMainThread());
+ bool retVal = false;
+ if (get().mMainThread) {
+ get().mMainThread->HasPendingHighPriorityEvents(&retVal);
+ }
+ return retVal;
+}
+
+NS_IMETHODIMP
+nsThreadManager::IdleDispatchToMainThread(nsIRunnable* aEvent,
+ uint32_t aTimeout) {
+ // Note: C++ callers should instead use NS_DispatchToThreadQueue or
+ // NS_DispatchToCurrentThreadQueue.
+ MOZ_ASSERT(NS_IsMainThread());
+
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ if (aTimeout) {
+ return NS_DispatchToThreadQueue(event.forget(), aTimeout, mMainThread,
+ EventQueuePriority::Idle);
+ }
+
+ return NS_DispatchToThreadQueue(event.forget(), mMainThread,
+ EventQueuePriority::Idle);
+}
+
+NS_IMETHODIMP
+nsThreadManager::DispatchDirectTaskToCurrentThread(nsIRunnable* aEvent) {
+ NS_ENSURE_STATE(aEvent);
+ nsCOMPtr<nsIRunnable> runnable = aEvent;
+ return GetCurrentThread()->DispatchDirectTask(runnable.forget());
+}
diff --git a/xpcom/threads/nsThreadManager.h b/xpcom/threads/nsThreadManager.h
new file mode 100644
index 0000000000..444cdb23d2
--- /dev/null
+++ b/xpcom/threads/nsThreadManager.h
@@ -0,0 +1,117 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsThreadManager_h__
+#define nsThreadManager_h__
+
+#include "nsIThreadManager.h"
+#include "nsThread.h"
+#include "mozilla/ShutdownPhase.h"
+
+class nsIRunnable;
+class nsIEventTarget;
+class nsISerialEventTarget;
+class nsIThread;
+
+namespace mozilla {
+class IdleTaskManager;
+class SynchronizedEventQueue;
+} // namespace mozilla
+
+class BackgroundEventTarget;
+
+class nsThreadManager : public nsIThreadManager {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSITHREADMANAGER
+
+ static nsThreadManager& get();
+
+ nsresult Init();
+
+ // Shutdown all threads other than the main thread. This function should only
+ // be called on the main thread of the application process.
+ void ShutdownNonMainThreads();
+
+ // Finish shutting down all threads. This function must be called after
+ // ShutdownNonMainThreads and will delete the BackgroundEventTarget and
+ // take the main thread event target out of commission, but without
+ // releasing the underlying nsThread object.
+ void ShutdownMainThread();
+
+ // Release the underlying main thread nsThread object.
+ void ReleaseMainThread();
+
+ // Called by nsThread to inform the ThreadManager it exists. This method
+ // must be called when the given thread is the current thread.
+ void RegisterCurrentThread(nsThread& aThread);
+
+ // Called by nsThread to inform the ThreadManager it is going away. This
+ // method must be called when the given thread is the current thread.
+ void UnregisterCurrentThread(nsThread& aThread);
+
+ // Returns the current thread. Returns null if OOM or if ThreadManager isn't
+ // initialized. Creates the nsThread if one does not exist yet.
+ nsThread* GetCurrentThread();
+
+ // Returns true iff the currently running thread has an nsThread associated
+ // with it (ie; whether this is a thread that we can dispatch runnables to).
+ bool IsNSThread() const;
+
+ // CreateCurrentThread sets up an nsThread for the current thread. It uses the
+ // event queue and main thread flags passed in. It should only be called once
+ // for the current thread. After it returns, GetCurrentThread() will return
+ // the thread that was created. GetCurrentThread() will also create a thread
+ // (lazily), but it doesn't allow the queue or main-thread attributes to be
+ // specified.
+ nsThread* CreateCurrentThread(mozilla::SynchronizedEventQueue* aQueue,
+ nsThread::MainThreadFlag aMainThread);
+
+ nsresult DispatchToBackgroundThread(nsIRunnable* aEvent,
+ uint32_t aDispatchFlags);
+
+ already_AddRefed<nsISerialEventTarget> CreateBackgroundTaskQueue(
+ const char* aName);
+
+ ~nsThreadManager();
+
+ void EnableMainThreadEventPrioritization();
+ void FlushInputEventPrioritization();
+ void SuspendInputEventPrioritization();
+ void ResumeInputEventPrioritization();
+
+ static bool MainThreadHasPendingHighPriorityEvents();
+
+ nsIThread* GetMainThreadWeak() { return mMainThread; }
+
+ private:
+ nsThreadManager();
+
+ nsresult SpinEventLoopUntilInternal(
+ const nsACString& aVeryGoodReasonToDoThis,
+ nsINestedEventLoopCondition* aCondition,
+ mozilla::ShutdownPhase aShutdownPhaseToCheck);
+
+ static void ReleaseThread(void* aData);
+
+ unsigned mCurThreadIndex; // thread-local-storage index
+ RefPtr<mozilla::IdleTaskManager> mIdleTaskManager;
+ RefPtr<nsThread> mMainThread;
+ PRThread* mMainPRThread;
+ mozilla::Atomic<bool, mozilla::SequentiallyConsistent> mInitialized;
+
+ // Shared event target used for background runnables.
+ RefPtr<BackgroundEventTarget> mBackgroundEventTarget;
+};
+
+#define NS_THREADMANAGER_CID \
+ { /* 7a4204c6-e45a-4c37-8ebb-6709a22c917c */ \
+ 0x7a4204c6, 0xe45a, 0x4c37, { \
+ 0x8e, 0xbb, 0x67, 0x09, 0xa2, 0x2c, 0x91, 0x7c \
+ } \
+ }
+
+#endif // nsThreadManager_h__
diff --git a/xpcom/threads/nsThreadPool.cpp b/xpcom/threads/nsThreadPool.cpp
new file mode 100644
index 0000000000..362e18f5a7
--- /dev/null
+++ b/xpcom/threads/nsThreadPool.cpp
@@ -0,0 +1,611 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsThreadPool.h"
+
+#include "nsCOMArray.h"
+#include "ThreadDelay.h"
+#include "nsThreadManager.h"
+#include "nsThread.h"
+#include "nsThreadUtils.h"
+#include "prinrval.h"
+#include "mozilla/Logging.h"
+#include "mozilla/ProfilerLabels.h"
+#include "mozilla/ProfilerRunnable.h"
+#include "mozilla/SchedulerGroup.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/SpinEventLoopUntil.h"
+#include "nsThreadSyncDispatch.h"
+
+#include <mutex>
+
+using namespace mozilla;
+
+static LazyLogModule sThreadPoolLog("nsThreadPool");
+#ifdef LOG
+# undef LOG
+#endif
+#define LOG(args) MOZ_LOG(sThreadPoolLog, mozilla::LogLevel::Debug, args)
+
+static MOZ_THREAD_LOCAL(nsThreadPool*) gCurrentThreadPool;
+
+void nsThreadPool::InitTLS() { gCurrentThreadPool.infallibleInit(); }
+
+// DESIGN:
+// o Allocate anonymous threads.
+// o Use nsThreadPool::Run as the main routine for each thread.
+// o Each thread waits on the event queue's monitor, checking for
+// pending events and rescheduling itself as an idle thread.
+
+#define DEFAULT_THREAD_LIMIT 4
+#define DEFAULT_IDLE_THREAD_LIMIT 1
+#define DEFAULT_IDLE_THREAD_TIMEOUT PR_SecondsToInterval(60)
+
+NS_IMPL_ISUPPORTS_INHERITED(nsThreadPool, Runnable, nsIThreadPool,
+ nsIEventTarget)
+
+nsThreadPool* nsThreadPool::GetCurrentThreadPool() {
+ return gCurrentThreadPool.get();
+}
+
+nsThreadPool::nsThreadPool()
+ : Runnable("nsThreadPool"),
+ mMutex("[nsThreadPool.mMutex]"),
+ mEventsAvailable(mMutex, "[nsThreadPool.mEventsAvailable]"),
+ mThreadLimit(DEFAULT_THREAD_LIMIT),
+ mIdleThreadLimit(DEFAULT_IDLE_THREAD_LIMIT),
+ mIdleThreadTimeout(DEFAULT_IDLE_THREAD_TIMEOUT),
+ mIdleCount(0),
+ mQoSPriority(nsIThread::QOS_PRIORITY_NORMAL),
+ mStackSize(nsIThreadManager::DEFAULT_STACK_SIZE),
+ mShutdown(false),
+ mRegressiveMaxIdleTime(false),
+ mIsAPoolThreadFree(true) {
+ LOG(("THRD-P(%p) constructor!!!\n", this));
+}
+
+nsThreadPool::~nsThreadPool() {
+ // Threads keep a reference to the nsThreadPool until they return from Run()
+ // after removing themselves from mThreads.
+ MOZ_ASSERT(mThreads.IsEmpty());
+}
+
+nsresult nsThreadPool::PutEvent(nsIRunnable* aEvent) {
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return PutEvent(event.forget(), 0);
+}
+
+nsresult nsThreadPool::PutEvent(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aFlags) {
+ // Avoid spawning a new thread while holding the event queue lock...
+
+ bool spawnThread = false;
+ uint32_t stackSize = 0;
+ nsCString name;
+ {
+ MutexAutoLock lock(mMutex);
+
+ if (NS_WARN_IF(mShutdown)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ LOG(("THRD-P(%p) put [%d %d %d]\n", this, mIdleCount, mThreads.Count(),
+ mThreadLimit));
+ MOZ_ASSERT(mIdleCount <= (uint32_t)mThreads.Count(), "oops");
+
+ // Make sure we have a thread to service this event.
+ if (mThreads.Count() < (int32_t)mThreadLimit &&
+ !(aFlags & NS_DISPATCH_AT_END) &&
+ // Spawn a new thread if we don't have enough idle threads to serve
+ // pending events immediately.
+ mEvents.Count(lock) >= mIdleCount) {
+ spawnThread = true;
+ }
+
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ LogRunnable::LogDispatch(event);
+ mEvents.PutEvent(event.forget(), EventQueuePriority::Normal, lock);
+ mEventsAvailable.Notify();
+ stackSize = mStackSize;
+ name = mName;
+ }
+
+ auto delay = MakeScopeExit([&]() {
+ // Delay to encourage the receiving task to run before we do work.
+ DelayForChaosMode(ChaosFeature::TaskDispatching, 1000);
+ });
+
+ LOG(("THRD-P(%p) put [spawn=%d]\n", this, spawnThread));
+ if (!spawnThread) {
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIThread> thread;
+ nsresult rv = NS_NewNamedThread(
+ mThreadNaming.GetNextThreadName(name), getter_AddRefs(thread), nullptr,
+ {.stackSize = stackSize, .blockDispatch = true});
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ bool killThread = false;
+ {
+ MutexAutoLock lock(mMutex);
+ if (mShutdown) {
+ killThread = true;
+ } else if (mThreads.Count() < (int32_t)mThreadLimit) {
+ mThreads.AppendObject(thread);
+ if (mThreads.Count() >= (int32_t)mThreadLimit) {
+ mIsAPoolThreadFree = false;
+ }
+ } else {
+ // Someone else may have also been starting a thread
+ killThread = true; // okay, we don't need this thread anymore
+ }
+ }
+ LOG(("THRD-P(%p) put [%p kill=%d]\n", this, thread.get(), killThread));
+ if (killThread) {
+ // We never dispatched any events to the thread, so we can shut it down
+ // asynchronously without worrying about anything.
+ ShutdownThread(thread);
+ } else {
+ thread->Dispatch(this, NS_DISPATCH_IGNORE_BLOCK_DISPATCH);
+ }
+
+ return NS_OK;
+}
+
+void nsThreadPool::ShutdownThread(nsIThread* aThread) {
+ LOG(("THRD-P(%p) shutdown async [%p]\n", this, aThread));
+
+ // This is either called by a threadpool thread that is out of work, or
+ // a thread that attempted to create a threadpool thread and raced in
+ // such a way that the newly created thread is no longer necessary.
+ // In the first case, we must go to another thread to shut aThread down
+ // (because it is the current thread). In the second case, we cannot
+ // synchronously shut down the current thread (because then Dispatch() would
+ // spin the event loop, and that could blow up the world), and asynchronous
+ // shutdown requires this thread have an event loop (and it may not, see bug
+ // 10204784). The simplest way to cover all cases is to asynchronously
+ // shutdown aThread from the main thread.
+ SchedulerGroup::Dispatch(
+ TaskCategory::Other,
+ NewRunnableMethod("nsIThread::AsyncShutdown", aThread,
+ &nsIThread::AsyncShutdown));
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetQoSForThreads(nsIThread::QoSPriority aPriority) {
+ MutexAutoLock lock(mMutex);
+ mQoSPriority = aPriority;
+
+ // We don't notify threads here to observe the change, because we don't want
+ // to create spurious wakeups during idle. Rather, we want threads to simply
+ // observe the change on their own if they wake up to do some task.
+
+ return NS_OK;
+}
+
+// This event 'runs' for the lifetime of the worker thread. The actual
+// eventqueue is mEvents, and is shared by all the worker threads. This
+// means that the set of threads together define the delay seen by a new
+// event sent to the pool.
+//
+// To model the delay experienced by the pool, we can have each thread in
+// the pool report 0 if it's idle OR if the pool is below the threadlimit;
+// or otherwise the current event's queuing delay plus current running
+// time.
+//
+// To reconstruct the delays for the pool, the profiler can look at all the
+// threads that are part of a pool (pools have defined naming patterns that
+// can be user to connect them). If all threads have delays at time X,
+// that means that all threads saturated at that point and any event
+// dispatched to the pool would get a delay.
+//
+// The delay experienced by an event dispatched when all pool threads are
+// busy is based on the calculations shown in platform.cpp. Run that
+// algorithm for each thread in the pool, and the delay at time X is the
+// longest value for time X of any of the threads, OR the time from X until
+// any one of the threads reports 0 (i.e. it's not busy), whichever is
+// shorter.
+
+// In order to record this when the profiler samples threads in the pool,
+// each thread must (effectively) override GetRunnningEventDelay, by
+// resetting the mLastEventDelay/Start values in the nsThread when we start
+// to run an event (or when we run out of events to run). Note that handling
+// the shutdown of a thread may be a little tricky.
+
+NS_IMETHODIMP
+nsThreadPool::Run() {
+ nsCOMPtr<nsIThread> current;
+ nsThreadManager::get().GetCurrentThread(getter_AddRefs(current));
+
+ bool shutdownThreadOnExit = false;
+ bool exitThread = false;
+ bool wasIdle = false;
+ TimeStamp idleSince;
+ nsIThread::QoSPriority threadPriority = nsIThread::QOS_PRIORITY_NORMAL;
+
+ // This thread is an nsThread created below with NS_NewNamedThread()
+ static_cast<nsThread*>(current.get())
+ ->SetPoolThreadFreePtr(&mIsAPoolThreadFree);
+
+ nsCOMPtr<nsIThreadPoolListener> listener;
+ {
+ MutexAutoLock lock(mMutex);
+ listener = mListener;
+ LOG(("THRD-P(%p) enter %s\n", this, mName.BeginReading()));
+
+ // Go ahead and check for thread priority. If priority is normal, do nothing
+ // because threads are created with default priority.
+ if (threadPriority != mQoSPriority) {
+ current->SetThreadQoS(threadPriority);
+ threadPriority = mQoSPriority;
+ }
+ }
+
+ if (listener) {
+ listener->OnThreadCreated();
+ }
+
+ MOZ_ASSERT(!gCurrentThreadPool.get());
+ gCurrentThreadPool.set(this);
+
+ do {
+ nsCOMPtr<nsIRunnable> event;
+ TimeDuration delay;
+ {
+ MutexAutoLock lock(mMutex);
+
+ // Before getting the next event, we can adjust priority as needed.
+ if (threadPriority != mQoSPriority) {
+ current->SetThreadQoS(threadPriority);
+ threadPriority = mQoSPriority;
+ }
+
+ event = mEvents.GetEvent(lock, &delay);
+ if (!event) {
+ TimeStamp now = TimeStamp::Now();
+ uint32_t idleTimeoutDivider =
+ (mIdleCount && mRegressiveMaxIdleTime) ? mIdleCount : 1;
+ TimeDuration timeout = TimeDuration::FromMilliseconds(
+ static_cast<double>(mIdleThreadTimeout) / idleTimeoutDivider);
+
+ // If we are shutting down, then don't keep any idle threads.
+ if (mShutdown) {
+ exitThread = true;
+ } else {
+ if (wasIdle) {
+ // if too many idle threads or idle for too long, then bail.
+ if (mIdleCount > mIdleThreadLimit ||
+ (mIdleThreadTimeout != UINT32_MAX &&
+ (now - idleSince) >= timeout)) {
+ exitThread = true;
+ }
+ } else {
+ // if would be too many idle threads...
+ if (mIdleCount == mIdleThreadLimit) {
+ exitThread = true;
+ } else {
+ ++mIdleCount;
+ idleSince = now;
+ wasIdle = true;
+ }
+ }
+ }
+
+ if (exitThread) {
+ if (wasIdle) {
+ --mIdleCount;
+ }
+ shutdownThreadOnExit = mThreads.RemoveObject(current);
+
+ // keep track if there are threads available to start
+ mIsAPoolThreadFree = (mThreads.Count() < (int32_t)mThreadLimit);
+ } else {
+ current->SetRunningEventDelay(TimeDuration(), TimeStamp());
+
+ AUTO_PROFILER_LABEL("nsThreadPool::Run::Wait", IDLE);
+
+ TimeDuration delta = timeout - (now - idleSince);
+ LOG(("THRD-P(%p) %s waiting [%f]\n", this, mName.BeginReading(),
+ delta.ToMilliseconds()));
+ mEventsAvailable.Wait(delta);
+ LOG(("THRD-P(%p) done waiting\n", this));
+ }
+ } else if (wasIdle) {
+ wasIdle = false;
+ --mIdleCount;
+ }
+ }
+ if (event) {
+ if (MOZ_LOG_TEST(sThreadPoolLog, mozilla::LogLevel::Debug)) {
+ MutexAutoLock lock(mMutex);
+ LOG(("THRD-P(%p) %s running [%p]\n", this, mName.BeginReading(),
+ event.get()));
+ }
+
+ // Delay event processing to encourage whoever dispatched this event
+ // to run.
+ DelayForChaosMode(ChaosFeature::TaskRunning, 1000);
+
+ if (profiler_thread_is_being_profiled(
+ ThreadProfilingFeatures::Sampling)) {
+ // We'll handle the case of unstarted threads available
+ // when we sample.
+ current->SetRunningEventDelay(delay, TimeStamp::Now());
+ }
+
+ LogRunnable::Run log(event);
+ AUTO_PROFILE_FOLLOWING_RUNNABLE(event);
+ event->Run();
+ // To cover the event's destructor code in the LogRunnable span
+ event = nullptr;
+ }
+ } while (!exitThread);
+
+ if (listener) {
+ listener->OnThreadShuttingDown();
+ }
+
+ MOZ_ASSERT(gCurrentThreadPool.get() == this);
+ gCurrentThreadPool.set(nullptr);
+
+ if (shutdownThreadOnExit) {
+ ShutdownThread(current);
+ }
+
+ LOG(("THRD-P(%p) leave\n", this));
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) {
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return Dispatch(event.forget(), aFlags);
+}
+
+NS_IMETHODIMP
+nsThreadPool::Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags) {
+ LOG(("THRD-P(%p) dispatch [%p %x]\n", this, /* XXX aEvent*/ nullptr, aFlags));
+
+ if (NS_WARN_IF(mShutdown)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ NS_ASSERTION(aFlags == NS_DISPATCH_NORMAL || aFlags == NS_DISPATCH_AT_END,
+ "unexpected dispatch flags");
+ PutEvent(std::move(aEvent), aFlags);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::DelayedDispatch(already_AddRefed<nsIRunnable>, uint32_t) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+nsThreadPool::RegisterShutdownTask(nsITargetShutdownTask*) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+nsThreadPool::UnregisterShutdownTask(nsITargetShutdownTask*) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP_(bool)
+nsThreadPool::IsOnCurrentThreadInfallible() {
+ return gCurrentThreadPool.get() == this;
+}
+
+NS_IMETHODIMP
+nsThreadPool::IsOnCurrentThread(bool* aResult) {
+ MutexAutoLock lock(mMutex);
+ if (NS_WARN_IF(mShutdown)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ *aResult = IsOnCurrentThreadInfallible();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::Shutdown() { return ShutdownWithTimeout(-1); }
+
+NS_IMETHODIMP
+nsThreadPool::ShutdownWithTimeout(int32_t aTimeoutMs) {
+ nsCOMArray<nsIThread> threads;
+ nsCOMPtr<nsIThreadPoolListener> listener;
+ {
+ MutexAutoLock lock(mMutex);
+ if (mShutdown) {
+ return NS_ERROR_ILLEGAL_DURING_SHUTDOWN;
+ }
+ mShutdown = true;
+ mEventsAvailable.NotifyAll();
+
+ threads.AppendObjects(mThreads);
+ mThreads.Clear();
+
+ // Swap in a null listener so that we release the listener at the end of
+ // this method. The listener will be kept alive as long as the other threads
+ // that were created when it was set.
+ mListener.swap(listener);
+ }
+
+ nsTArray<nsCOMPtr<nsIThreadShutdown>> contexts;
+ for (int32_t i = 0; i < threads.Count(); ++i) {
+ nsCOMPtr<nsIThreadShutdown> context;
+ if (NS_SUCCEEDED(threads[i]->BeginShutdown(getter_AddRefs(context)))) {
+ contexts.AppendElement(std::move(context));
+ }
+ }
+
+ // Start a timer which will stop waiting & leak the thread, forcing
+ // onCompletion to be called when it expires.
+ nsCOMPtr<nsITimer> timer;
+ if (aTimeoutMs >= 0) {
+ NS_NewTimerWithCallback(
+ getter_AddRefs(timer),
+ [&](nsITimer*) {
+ for (auto& context : contexts) {
+ context->StopWaitingAndLeakThread();
+ }
+ },
+ aTimeoutMs, nsITimer::TYPE_ONE_SHOT,
+ "nsThreadPool::ShutdownWithTimeout");
+ }
+
+ // Start a counter and register a callback to decrement outstandingThreads
+ // when the threads finish exiting. We'll spin an event loop until
+ // outstandingThreads reaches 0.
+ uint32_t outstandingThreads = contexts.Length();
+ RefPtr onCompletion = NS_NewCancelableRunnableFunction(
+ "nsThreadPool thread completion", [&] { --outstandingThreads; });
+ for (auto& context : contexts) {
+ context->OnCompletion(onCompletion);
+ }
+
+ mozilla::SpinEventLoopUntil("nsThreadPool::ShutdownWithTimeout"_ns,
+ [&] { return outstandingThreads == 0; });
+
+ if (timer) {
+ timer->Cancel();
+ }
+ onCompletion->Cancel();
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::GetThreadLimit(uint32_t* aValue) {
+ MutexAutoLock lock(mMutex);
+ *aValue = mThreadLimit;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetThreadLimit(uint32_t aValue) {
+ MutexAutoLock lock(mMutex);
+ LOG(("THRD-P(%p) thread limit [%u]\n", this, aValue));
+ mThreadLimit = aValue;
+ if (mIdleThreadLimit > mThreadLimit) {
+ mIdleThreadLimit = mThreadLimit;
+ }
+
+ if (static_cast<uint32_t>(mThreads.Count()) > mThreadLimit) {
+ mEventsAvailable
+ .NotifyAll(); // wake up threads so they observe this change
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::GetIdleThreadLimit(uint32_t* aValue) {
+ MutexAutoLock lock(mMutex);
+ *aValue = mIdleThreadLimit;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetIdleThreadLimit(uint32_t aValue) {
+ MutexAutoLock lock(mMutex);
+ LOG(("THRD-P(%p) idle thread limit [%u]\n", this, aValue));
+ mIdleThreadLimit = aValue;
+ if (mIdleThreadLimit > mThreadLimit) {
+ mIdleThreadLimit = mThreadLimit;
+ }
+
+ // Do we need to kill some idle threads?
+ if (mIdleCount > mIdleThreadLimit) {
+ mEventsAvailable
+ .NotifyAll(); // wake up threads so they observe this change
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::GetIdleThreadTimeout(uint32_t* aValue) {
+ MutexAutoLock lock(mMutex);
+ *aValue = mIdleThreadTimeout;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetIdleThreadTimeout(uint32_t aValue) {
+ MutexAutoLock lock(mMutex);
+ uint32_t oldTimeout = mIdleThreadTimeout;
+ mIdleThreadTimeout = aValue;
+
+ // Do we need to notify any idle threads that their sleep time has shortened?
+ if (mIdleThreadTimeout < oldTimeout && mIdleCount > 0) {
+ mEventsAvailable
+ .NotifyAll(); // wake up threads so they observe this change
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::GetIdleThreadTimeoutRegressive(bool* aValue) {
+ MutexAutoLock lock(mMutex);
+ *aValue = mRegressiveMaxIdleTime;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetIdleThreadTimeoutRegressive(bool aValue) {
+ MutexAutoLock lock(mMutex);
+ bool oldRegressive = mRegressiveMaxIdleTime;
+ mRegressiveMaxIdleTime = aValue;
+
+ // Would setting regressive timeout effect idle threads?
+ if (mRegressiveMaxIdleTime > oldRegressive && mIdleCount > 1) {
+ mEventsAvailable
+ .NotifyAll(); // wake up threads so they observe this change
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::GetThreadStackSize(uint32_t* aValue) {
+ MutexAutoLock lock(mMutex);
+ *aValue = mStackSize;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetThreadStackSize(uint32_t aValue) {
+ MutexAutoLock lock(mMutex);
+ mStackSize = aValue;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::GetListener(nsIThreadPoolListener** aListener) {
+ MutexAutoLock lock(mMutex);
+ NS_IF_ADDREF(*aListener = mListener);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetListener(nsIThreadPoolListener* aListener) {
+ nsCOMPtr<nsIThreadPoolListener> swappedListener(aListener);
+ {
+ MutexAutoLock lock(mMutex);
+ mListener.swap(swappedListener);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetName(const nsACString& aName) {
+ MutexAutoLock lock(mMutex);
+ if (mThreads.Count()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ mName = aName;
+ return NS_OK;
+}
diff --git a/xpcom/threads/nsThreadPool.h b/xpcom/threads/nsThreadPool.h
new file mode 100644
index 0000000000..b10a2f5265
--- /dev/null
+++ b/xpcom/threads/nsThreadPool.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsThreadPool_h__
+#define nsThreadPool_h__
+
+#include "nsIThreadPool.h"
+#include "nsIRunnable.h"
+#include "nsCOMArray.h"
+#include "nsCOMPtr.h"
+#include "nsThreadUtils.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/CondVar.h"
+#include "mozilla/EventQueue.h"
+#include "mozilla/Mutex.h"
+
+class nsIThread;
+
+class nsThreadPool final : public mozilla::Runnable, public nsIThreadPool {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_NSIEVENTTARGET_FULL
+ NS_DECL_NSITHREADPOOL
+ NS_DECL_NSIRUNNABLE
+
+ nsThreadPool();
+
+ static void InitTLS();
+ static nsThreadPool* GetCurrentThreadPool();
+
+ private:
+ ~nsThreadPool();
+
+ void ShutdownThread(nsIThread* aThread);
+ nsresult PutEvent(nsIRunnable* aEvent);
+ nsresult PutEvent(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags);
+
+ mozilla::Mutex mMutex;
+ nsCOMArray<nsIThread> mThreads MOZ_GUARDED_BY(mMutex);
+ mozilla::CondVar mEventsAvailable MOZ_GUARDED_BY(mMutex);
+ mozilla::EventQueue mEvents MOZ_GUARDED_BY(mMutex);
+ uint32_t mThreadLimit MOZ_GUARDED_BY(mMutex);
+ uint32_t mIdleThreadLimit MOZ_GUARDED_BY(mMutex);
+ uint32_t mIdleThreadTimeout MOZ_GUARDED_BY(mMutex);
+ uint32_t mIdleCount MOZ_GUARDED_BY(mMutex);
+ nsIThread::QoSPriority mQoSPriority MOZ_GUARDED_BY(mMutex);
+ uint32_t mStackSize MOZ_GUARDED_BY(mMutex);
+ nsCOMPtr<nsIThreadPoolListener> mListener MOZ_GUARDED_BY(mMutex);
+ mozilla::Atomic<bool, mozilla::Relaxed> mShutdown;
+ bool mRegressiveMaxIdleTime MOZ_GUARDED_BY(mMutex);
+ mozilla::Atomic<bool, mozilla::Relaxed> mIsAPoolThreadFree;
+ // set once before we start threads
+ nsCString mName MOZ_GUARDED_BY(mMutex);
+ nsThreadPoolNaming mThreadNaming; // all data inside this is atomic
+};
+
+#define NS_THREADPOOL_CID \
+ { /* 547ec2a8-315e-4ec4-888e-6e4264fe90eb */ \
+ 0x547ec2a8, 0x315e, 0x4ec4, { \
+ 0x88, 0x8e, 0x6e, 0x42, 0x64, 0xfe, 0x90, 0xeb \
+ } \
+ }
+
+#endif // nsThreadPool_h__
diff --git a/xpcom/threads/nsThreadSyncDispatch.h b/xpcom/threads/nsThreadSyncDispatch.h
new file mode 100644
index 0000000000..1673453f9d
--- /dev/null
+++ b/xpcom/threads/nsThreadSyncDispatch.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsThreadSyncDispatch_h_
+#define nsThreadSyncDispatch_h_
+
+#include "mozilla/Atomics.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/SpinEventLoopUntil.h"
+
+#include "nsThreadUtils.h"
+#include "LeakRefPtr.h"
+
+class nsThreadSyncDispatch : public mozilla::Runnable {
+ public:
+ nsThreadSyncDispatch(already_AddRefed<nsIEventTarget> aOrigin,
+ already_AddRefed<nsIRunnable>&& aTask)
+ : Runnable("nsThreadSyncDispatch"),
+ mOrigin(aOrigin),
+ mSyncTask(std::move(aTask)),
+ mIsPending(true) {}
+
+ bool IsPending() {
+ // This is an atomic acquire on the origin thread.
+ return mIsPending;
+ }
+
+ void SpinEventLoopUntilComplete(const nsACString& aVeryGoodReasonToDoThis) {
+ mozilla::SpinEventLoopUntil(aVeryGoodReasonToDoThis,
+ [&]() -> bool { return !IsPending(); });
+ }
+
+ private:
+ NS_IMETHOD Run() override {
+ if (nsCOMPtr<nsIRunnable> task = mSyncTask.take()) {
+ MOZ_ASSERT(!mSyncTask);
+
+ mozilla::DebugOnly<nsresult> result = task->Run();
+ MOZ_ASSERT(NS_SUCCEEDED(result), "task in sync dispatch should not fail");
+
+ // We must release the task here to ensure that when the original
+ // thread is unblocked, this task has been released.
+ task = nullptr;
+
+ // This is an atomic release on the target thread.
+ mIsPending = false;
+
+ // unblock the origin thread
+ mOrigin->Dispatch(this, NS_DISPATCH_IGNORE_BLOCK_DISPATCH);
+ }
+
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIEventTarget> mOrigin;
+ // The task is leaked by default when Run() is not called, because
+ // otherwise we may release it in an incorrect thread.
+ mozilla::LeakRefPtr<nsIRunnable> mSyncTask;
+ mozilla::Atomic<bool, mozilla::ReleaseAcquire> mIsPending;
+};
+
+#endif // nsThreadSyncDispatch_h_
diff --git a/xpcom/threads/nsThreadUtils.cpp b/xpcom/threads/nsThreadUtils.cpp
new file mode 100644
index 0000000000..6eeb3999ac
--- /dev/null
+++ b/xpcom/threads/nsThreadUtils.cpp
@@ -0,0 +1,768 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsThreadUtils.h"
+
+#include "chrome/common/ipc_message.h" // for IPC::Message
+#include "LeakRefPtr.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Likely.h"
+#include "mozilla/TimeStamp.h"
+#include "nsComponentManagerUtils.h"
+#include "nsExceptionHandler.h"
+#include "nsIEventTarget.h"
+#include "nsITimer.h"
+#include "nsString.h"
+#include "nsThreadSyncDispatch.h"
+#include "nsTimerImpl.h"
+#include "prsystem.h"
+
+#include "nsThreadManager.h"
+#include "nsThreadPool.h"
+#include "TaskController.h"
+
+#ifdef XP_WIN
+# include <windows.h>
+#elif defined(XP_MACOSX)
+# include <sys/resource.h>
+#endif
+
+#if defined(ANDROID)
+# include <sys/prctl.h>
+#endif
+
+static mozilla::LazyLogModule sEventDispatchAndRunLog("events");
+#ifdef LOG1
+# undef LOG1
+#endif
+#define LOG1(args) \
+ MOZ_LOG(sEventDispatchAndRunLog, mozilla::LogLevel::Error, args)
+#define LOG1_ENABLED() \
+ MOZ_LOG_TEST(sEventDispatchAndRunLog, mozilla::LogLevel::Error)
+
+using namespace mozilla;
+
+#ifndef XPCOM_GLUE_AVOID_NSPR
+
+NS_IMPL_ISUPPORTS(IdlePeriod, nsIIdlePeriod)
+
+NS_IMETHODIMP
+IdlePeriod::GetIdlePeriodHint(TimeStamp* aIdleDeadline) {
+ *aIdleDeadline = TimeStamp();
+ return NS_OK;
+}
+
+// NS_IMPL_NAMED_* relies on the mName field, which is not present on
+// release or beta. Instead, fall back to using "Runnable" for all
+// runnables.
+# ifndef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+NS_IMPL_ISUPPORTS(Runnable, nsIRunnable)
+# else
+NS_IMPL_NAMED_ADDREF(Runnable, mName)
+NS_IMPL_NAMED_RELEASE(Runnable, mName)
+NS_IMPL_QUERY_INTERFACE(Runnable, nsIRunnable, nsINamed)
+# endif
+
+NS_IMETHODIMP
+Runnable::Run() {
+ // Do nothing
+ return NS_OK;
+}
+
+# ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+NS_IMETHODIMP
+Runnable::GetName(nsACString& aName) {
+ if (mName) {
+ aName.AssignASCII(mName);
+ } else {
+ aName.Truncate();
+ }
+ return NS_OK;
+}
+# endif
+
+NS_IMPL_ISUPPORTS_INHERITED(DiscardableRunnable, Runnable,
+ nsIDiscardableRunnable)
+
+NS_IMPL_ISUPPORTS_INHERITED(CancelableRunnable, DiscardableRunnable,
+ nsICancelableRunnable)
+
+void CancelableRunnable::OnDiscard() {
+ // Tasks that implement Cancel() can be safely cleaned up if it turns out
+ // that the task will not run.
+ (void)NS_WARN_IF(NS_FAILED(Cancel()));
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(IdleRunnable, DiscardableRunnable, nsIIdleRunnable)
+
+NS_IMPL_ISUPPORTS_INHERITED(CancelableIdleRunnable, CancelableRunnable,
+ nsIIdleRunnable)
+
+NS_IMPL_ISUPPORTS_INHERITED(PrioritizableRunnable, Runnable,
+ nsIRunnablePriority)
+
+PrioritizableRunnable::PrioritizableRunnable(
+ already_AddRefed<nsIRunnable>&& aRunnable, uint32_t aPriority)
+ // Real runnable name is managed by overridding the GetName function.
+ : Runnable("PrioritizableRunnable"),
+ mRunnable(std::move(aRunnable)),
+ mPriority(aPriority) {
+# if DEBUG
+ nsCOMPtr<nsIRunnablePriority> runnablePrio = do_QueryInterface(mRunnable);
+ MOZ_ASSERT(!runnablePrio);
+# endif
+}
+
+# ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+NS_IMETHODIMP
+PrioritizableRunnable::GetName(nsACString& aName) {
+ // Try to get a name from the underlying runnable.
+ nsCOMPtr<nsINamed> named = do_QueryInterface(mRunnable);
+ if (named) {
+ named->GetName(aName);
+ }
+ return NS_OK;
+}
+# endif
+
+NS_IMETHODIMP
+PrioritizableRunnable::Run() {
+ MOZ_RELEASE_ASSERT(NS_IsMainThread());
+ return mRunnable->Run();
+}
+
+NS_IMETHODIMP
+PrioritizableRunnable::GetPriority(uint32_t* aPriority) {
+ *aPriority = mPriority;
+ return NS_OK;
+}
+
+already_AddRefed<nsIRunnable> mozilla::CreateRenderBlockingRunnable(
+ already_AddRefed<nsIRunnable>&& aRunnable) {
+ nsCOMPtr<nsIRunnable> runnable = new PrioritizableRunnable(
+ std::move(aRunnable), nsIRunnablePriority::PRIORITY_RENDER_BLOCKING);
+ return runnable.forget();
+}
+
+NS_IMPL_ISUPPORTS_INHERITED(PrioritizableCancelableRunnable, CancelableRunnable,
+ nsIRunnablePriority)
+
+NS_IMETHODIMP
+PrioritizableCancelableRunnable::GetPriority(uint32_t* aPriority) {
+ *aPriority = mPriority;
+ return NS_OK;
+}
+
+#endif // XPCOM_GLUE_AVOID_NSPR
+
+//-----------------------------------------------------------------------------
+
+nsresult NS_NewNamedThread(const nsACString& aName, nsIThread** aResult,
+ nsIRunnable* aInitialEvent,
+ nsIThreadManager::ThreadCreationOptions aOptions) {
+ nsCOMPtr<nsIRunnable> event = aInitialEvent;
+ return NS_NewNamedThread(aName, aResult, event.forget(), aOptions);
+}
+
+nsresult NS_NewNamedThread(const nsACString& aName, nsIThread** aResult,
+ already_AddRefed<nsIRunnable> aInitialEvent,
+ nsIThreadManager::ThreadCreationOptions aOptions) {
+ nsCOMPtr<nsIRunnable> event = std::move(aInitialEvent);
+ nsCOMPtr<nsIThread> thread;
+ nsresult rv = nsThreadManager::get().nsThreadManager::NewNamedThread(
+ aName, aOptions, getter_AddRefs(thread));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ if (event) {
+ rv = thread->Dispatch(event.forget(), NS_DISPATCH_IGNORE_BLOCK_DISPATCH);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ }
+
+ *aResult = nullptr;
+ thread.swap(*aResult);
+ return NS_OK;
+}
+
+nsresult NS_GetCurrentThread(nsIThread** aResult) {
+ return nsThreadManager::get().nsThreadManager::GetCurrentThread(aResult);
+}
+
+nsresult NS_GetMainThread(nsIThread** aResult) {
+ return nsThreadManager::get().nsThreadManager::GetMainThread(aResult);
+}
+
+nsresult NS_DispatchToCurrentThread(already_AddRefed<nsIRunnable>&& aEvent) {
+ nsresult rv;
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ // XXX: Consider using GetCurrentSerialEventTarget() to support TaskQueues.
+ nsISerialEventTarget* thread = NS_GetCurrentThread();
+ if (!thread) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ // To keep us from leaking the runnable if dispatch method fails,
+ // we grab the reference on failures and release it.
+ nsIRunnable* temp = event.get();
+ rv = thread->Dispatch(event.forget(), NS_DISPATCH_NORMAL);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ // Dispatch() leaked the reference to the event, but due to caller's
+ // assumptions, we shouldn't leak here. And given we are on the same
+ // thread as the dispatch target, it's mostly safe to do it here.
+ NS_RELEASE(temp);
+ }
+ return rv;
+}
+
+// It is common to call NS_DispatchToCurrentThread with a newly
+// allocated runnable with a refcount of zero. To keep us from leaking
+// the runnable if the dispatch method fails, we take a death grip.
+nsresult NS_DispatchToCurrentThread(nsIRunnable* aEvent) {
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return NS_DispatchToCurrentThread(event.forget());
+}
+
+nsresult NS_DispatchToMainThread(already_AddRefed<nsIRunnable>&& aEvent,
+ uint32_t aDispatchFlags) {
+ LeakRefPtr<nsIRunnable> event(std::move(aEvent));
+ nsCOMPtr<nsIThread> thread;
+ nsresult rv = NS_GetMainThread(getter_AddRefs(thread));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ NS_ASSERTION(false,
+ "Failed NS_DispatchToMainThread() in shutdown; leaking");
+ // NOTE: if you stop leaking here, adjust Promise::MaybeReportRejected(),
+ // which assumes a leak here, or split into leaks and no-leaks versions
+ return rv;
+ }
+ return thread->Dispatch(event.take(), aDispatchFlags);
+}
+
+// In the case of failure with a newly allocated runnable with a
+// refcount of zero, we intentionally leak the runnable, because it is
+// likely that the runnable is being dispatched to the main thread
+// because it owns main thread only objects, so it is not safe to
+// release them here.
+nsresult NS_DispatchToMainThread(nsIRunnable* aEvent, uint32_t aDispatchFlags) {
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return NS_DispatchToMainThread(event.forget(), aDispatchFlags);
+}
+
+nsresult NS_DelayedDispatchToCurrentThread(
+ already_AddRefed<nsIRunnable>&& aEvent, uint32_t aDelayMs) {
+ nsCOMPtr<nsIRunnable> event(aEvent);
+
+ // XXX: Consider using GetCurrentSerialEventTarget() to support TaskQueues.
+ nsISerialEventTarget* thread = NS_GetCurrentThread();
+ if (!thread) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ return thread->DelayedDispatch(event.forget(), aDelayMs);
+}
+
+nsresult NS_DispatchToThreadQueue(already_AddRefed<nsIRunnable>&& aEvent,
+ nsIThread* aThread,
+ EventQueuePriority aQueue) {
+ nsresult rv;
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ NS_ENSURE_TRUE(event, NS_ERROR_INVALID_ARG);
+ if (!aThread) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ // To keep us from leaking the runnable if dispatch method fails,
+ // we grab the reference on failures and release it.
+ nsIRunnable* temp = event.get();
+ rv = aThread->DispatchToQueue(event.forget(), aQueue);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ // Dispatch() leaked the reference to the event, but due to caller's
+ // assumptions, we shouldn't leak here. And given we are on the same
+ // thread as the dispatch target, it's mostly safe to do it here.
+ NS_RELEASE(temp);
+ }
+
+ return rv;
+}
+
+nsresult NS_DispatchToCurrentThreadQueue(already_AddRefed<nsIRunnable>&& aEvent,
+ EventQueuePriority aQueue) {
+ return NS_DispatchToThreadQueue(std::move(aEvent), NS_GetCurrentThread(),
+ aQueue);
+}
+
+extern nsresult NS_DispatchToMainThreadQueue(
+ already_AddRefed<nsIRunnable>&& aEvent, EventQueuePriority aQueue) {
+ nsCOMPtr<nsIThread> mainThread;
+ nsresult rv = NS_GetMainThread(getter_AddRefs(mainThread));
+ if (NS_SUCCEEDED(rv)) {
+ return NS_DispatchToThreadQueue(std::move(aEvent), mainThread, aQueue);
+ }
+ return rv;
+}
+
+class IdleRunnableWrapper final : public Runnable,
+ public nsIDiscardableRunnable,
+ public nsIIdleRunnable {
+ public:
+ explicit IdleRunnableWrapper(already_AddRefed<nsIRunnable>&& aEvent)
+ : Runnable("IdleRunnableWrapper"),
+ mRunnable(std::move(aEvent)),
+ mDiscardable(do_QueryInterface(mRunnable)) {}
+
+ NS_DECL_ISUPPORTS_INHERITED
+
+ NS_IMETHOD Run() override {
+ if (!mRunnable) {
+ return NS_OK;
+ }
+ CancelTimer();
+ // Don't clear mDiscardable because that would cause QueryInterface to
+ // change behavior during the lifetime of an instance.
+ nsCOMPtr<nsIRunnable> runnable = std::move(mRunnable);
+ return runnable->Run();
+ }
+
+ // nsIDiscardableRunnable
+ void OnDiscard() override {
+ if (!mRunnable) {
+ // Run() was already called from TimedOut().
+ return;
+ }
+ mDiscardable->OnDiscard();
+ mRunnable = nullptr;
+ }
+
+ static void TimedOut(nsITimer* aTimer, void* aClosure) {
+ RefPtr<IdleRunnableWrapper> runnable =
+ static_cast<IdleRunnableWrapper*>(aClosure);
+ LogRunnable::Run log(runnable);
+ runnable->Run();
+ runnable = nullptr;
+ }
+
+ void SetTimer(uint32_t aDelay, nsIEventTarget* aTarget) override {
+ MOZ_ASSERT(aTarget);
+ MOZ_ASSERT(!mTimer);
+ NS_NewTimerWithFuncCallback(getter_AddRefs(mTimer), TimedOut, this, aDelay,
+ nsITimer::TYPE_ONE_SHOT,
+ "IdleRunnableWrapper::SetTimer", aTarget);
+ }
+
+#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ NS_IMETHOD GetName(nsACString& aName) override {
+ aName.AssignLiteral("IdleRunnableWrapper");
+ if (nsCOMPtr<nsINamed> named = do_QueryInterface(mRunnable)) {
+ nsAutoCString name;
+ named->GetName(name);
+ if (!name.IsEmpty()) {
+ aName.AppendLiteral(" for ");
+ aName.Append(name);
+ }
+ }
+ return NS_OK;
+ }
+#endif
+
+ private:
+ ~IdleRunnableWrapper() { CancelTimer(); }
+
+ void CancelTimer() {
+ if (mTimer) {
+ mTimer->Cancel();
+ }
+ }
+
+ nsCOMPtr<nsITimer> mTimer;
+ nsCOMPtr<nsIRunnable> mRunnable;
+ nsCOMPtr<nsIDiscardableRunnable> mDiscardable;
+};
+
+NS_IMPL_ADDREF_INHERITED(IdleRunnableWrapper, Runnable)
+NS_IMPL_RELEASE_INHERITED(IdleRunnableWrapper, Runnable)
+
+NS_INTERFACE_MAP_BEGIN(IdleRunnableWrapper)
+ NS_INTERFACE_MAP_ENTRY(nsIIdleRunnable)
+ NS_INTERFACE_MAP_ENTRY_CONDITIONAL(nsIDiscardableRunnable, mDiscardable)
+NS_INTERFACE_MAP_END_INHERITING(Runnable)
+
+extern nsresult NS_DispatchToThreadQueue(already_AddRefed<nsIRunnable>&& aEvent,
+ uint32_t aTimeout, nsIThread* aThread,
+ EventQueuePriority aQueue) {
+ nsCOMPtr<nsIRunnable> event(std::move(aEvent));
+ NS_ENSURE_TRUE(event, NS_ERROR_INVALID_ARG);
+ MOZ_ASSERT(aQueue == EventQueuePriority::Idle ||
+ aQueue == EventQueuePriority::DeferredTimers);
+ if (!aThread) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ nsCOMPtr<nsIIdleRunnable> idleEvent = do_QueryInterface(event);
+
+ if (!idleEvent) {
+ idleEvent = new IdleRunnableWrapper(event.forget());
+ event = do_QueryInterface(idleEvent);
+ MOZ_DIAGNOSTIC_ASSERT(event);
+ }
+ idleEvent->SetTimer(aTimeout, aThread);
+
+ nsresult rv = NS_DispatchToThreadQueue(event.forget(), aThread, aQueue);
+ if (NS_SUCCEEDED(rv)) {
+ // This is intended to bind with the "DISP" log made from inside
+ // NS_DispatchToThreadQueue for the `event`. There is no possibly to inject
+ // another "DISP" for a different event on this thread.
+ LOG1(("TIMEOUT %u", aTimeout));
+ }
+
+ return rv;
+}
+
+extern nsresult NS_DispatchToCurrentThreadQueue(
+ already_AddRefed<nsIRunnable>&& aEvent, uint32_t aTimeout,
+ EventQueuePriority aQueue) {
+ return NS_DispatchToThreadQueue(std::move(aEvent), aTimeout,
+ NS_GetCurrentThread(), aQueue);
+}
+
+#ifndef XPCOM_GLUE_AVOID_NSPR
+nsresult NS_ProcessPendingEvents(nsIThread* aThread, PRIntervalTime aTimeout) {
+ nsresult rv = NS_OK;
+
+ if (!aThread) {
+ aThread = NS_GetCurrentThread();
+ if (NS_WARN_IF(!aThread)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ }
+
+ PRIntervalTime start = PR_IntervalNow();
+ for (;;) {
+ bool processedEvent;
+ rv = aThread->ProcessNextEvent(false, &processedEvent);
+ if (NS_FAILED(rv) || !processedEvent) {
+ break;
+ }
+ if (PR_IntervalNow() - start > aTimeout) {
+ break;
+ }
+ }
+ return rv;
+}
+#endif // XPCOM_GLUE_AVOID_NSPR
+
+inline bool hasPendingEvents(nsIThread* aThread) {
+ bool val;
+ return NS_SUCCEEDED(aThread->HasPendingEvents(&val)) && val;
+}
+
+bool NS_HasPendingEvents(nsIThread* aThread) {
+ if (!aThread) {
+ aThread = NS_GetCurrentThread();
+ if (NS_WARN_IF(!aThread)) {
+ return false;
+ }
+ }
+ return hasPendingEvents(aThread);
+}
+
+bool NS_ProcessNextEvent(nsIThread* aThread, bool aMayWait) {
+ if (!aThread) {
+ aThread = NS_GetCurrentThread();
+ if (NS_WARN_IF(!aThread)) {
+ return false;
+ }
+ }
+ bool val;
+ return NS_SUCCEEDED(aThread->ProcessNextEvent(aMayWait, &val)) && val;
+}
+
+void NS_SetCurrentThreadName(const char* aName) {
+#if defined(ANDROID)
+ // Workaround for Bug 1541216 - PR_SetCurrentThreadName() Fails to set the
+ // thread name on Android.
+ prctl(PR_SET_NAME, reinterpret_cast<unsigned long>(aName));
+#else
+ PR_SetCurrentThreadName(aName);
+#endif
+ if (nsThreadManager::get().IsNSThread()) {
+ nsThread* thread = nsThreadManager::get().GetCurrentThread();
+ thread->SetThreadNameInternal(nsDependentCString(aName));
+ }
+}
+
+nsIThread* NS_GetCurrentThread() {
+ return nsThreadManager::get().GetCurrentThread();
+}
+
+nsIThread* NS_GetCurrentThreadNoCreate() {
+ if (nsThreadManager::get().IsNSThread()) {
+ return NS_GetCurrentThread();
+ }
+ return nullptr;
+}
+
+// nsThreadPoolNaming
+nsCString nsThreadPoolNaming::GetNextThreadName(const nsACString& aPoolName) {
+ nsCString name(aPoolName);
+ name.AppendLiteral(" #");
+ name.AppendInt(++mCounter, 10); // The counter is declared as atomic
+ return name;
+}
+
+nsresult NS_DispatchBackgroundTask(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aDispatchFlags) {
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return nsThreadManager::get().DispatchToBackgroundThread(event,
+ aDispatchFlags);
+}
+
+// nsAutoLowPriorityIO
+nsAutoLowPriorityIO::nsAutoLowPriorityIO() {
+#if defined(XP_WIN)
+ lowIOPrioritySet =
+ SetThreadPriority(GetCurrentThread(), THREAD_MODE_BACKGROUND_BEGIN);
+#elif defined(XP_MACOSX)
+ oldPriority = getiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_THREAD);
+ lowIOPrioritySet =
+ oldPriority != -1 &&
+ setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_THREAD, IOPOL_THROTTLE) != -1;
+#else
+ lowIOPrioritySet = false;
+#endif
+}
+
+nsAutoLowPriorityIO::~nsAutoLowPriorityIO() {
+#if defined(XP_WIN)
+ if (MOZ_LIKELY(lowIOPrioritySet)) {
+ // On Windows the old thread priority is automatically restored
+ SetThreadPriority(GetCurrentThread(), THREAD_MODE_BACKGROUND_END);
+ }
+#elif defined(XP_MACOSX)
+ if (MOZ_LIKELY(lowIOPrioritySet)) {
+ setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_THREAD, oldPriority);
+ }
+#endif
+}
+
+namespace mozilla {
+
+nsISerialEventTarget* GetCurrentSerialEventTarget() {
+ if (nsISerialEventTarget* current =
+ SerialEventTargetGuard::GetCurrentSerialEventTarget()) {
+ return current;
+ }
+
+ MOZ_DIAGNOSTIC_ASSERT(!nsThreadPool::GetCurrentThreadPool(),
+ "Call to GetCurrentSerialEventTarget() from thread "
+ "pool without an active TaskQueue");
+
+ nsCOMPtr<nsIThread> thread;
+ nsresult rv = NS_GetCurrentThread(getter_AddRefs(thread));
+ if (NS_FAILED(rv)) {
+ return nullptr;
+ }
+
+ return thread;
+}
+
+nsISerialEventTarget* GetMainThreadSerialEventTarget() {
+ return static_cast<nsThread*>(nsThreadManager::get().GetMainThreadWeak());
+}
+
+size_t GetNumberOfProcessors() {
+#if defined(XP_LINUX) && defined(MOZ_SANDBOX)
+ static const PRInt32 procs = PR_GetNumberOfProcessors();
+#else
+ PRInt32 procs = PR_GetNumberOfProcessors();
+#endif
+ MOZ_ASSERT(procs > 0);
+ return static_cast<size_t>(procs);
+}
+
+template <typename T>
+void LogTaskBase<T>::LogDispatch(T* aEvent) {
+ LOG1(("DISP %p", aEvent));
+}
+template <typename T>
+void LogTaskBase<T>::LogDispatch(T* aEvent, void* aContext) {
+ LOG1(("DISP %p (%p)", aEvent, aContext));
+}
+
+template <>
+void LogTaskBase<IPC::Message>::LogDispatchWithPid(IPC::Message* aEvent,
+ int32_t aPid) {
+ if (aEvent->seqno() && aPid > 0) {
+ LOG1(("SEND %p %d %d", aEvent, aEvent->seqno(), aPid));
+ }
+}
+
+template <typename T>
+LogTaskBase<T>::Run::Run(T* aEvent, bool aWillRunAgain)
+ : mWillRunAgain(aWillRunAgain) {
+ // Logging address of this RAII so that we can use it to identify the DONE log
+ // while not keeping any ref to the event that could be invalid at the dtor
+ // time.
+ LOG1(("EXEC %p %p", aEvent, this));
+}
+template <typename T>
+LogTaskBase<T>::Run::Run(T* aEvent, void* aContext, bool aWillRunAgain)
+ : mWillRunAgain(aWillRunAgain) {
+ LOG1(("EXEC %p (%p) %p", aEvent, aContext, this));
+}
+
+template <>
+LogTaskBase<nsIRunnable>::Run::Run(nsIRunnable* aEvent, bool aWillRunAgain)
+ : mWillRunAgain(aWillRunAgain) {
+ if (!LOG1_ENABLED()) {
+ return;
+ }
+
+ nsCOMPtr<nsINamed> named(do_QueryInterface(aEvent));
+ if (!named) {
+ LOG1(("EXEC %p %p", aEvent, this));
+ return;
+ }
+
+ nsAutoCString name;
+ named->GetName(name);
+ LOG1(("EXEC %p %p [%s]", aEvent, this, name.BeginReading()));
+}
+
+template <>
+LogTaskBase<Task>::Run::Run(Task* aTask, bool aWillRunAgain)
+ : mWillRunAgain(aWillRunAgain) {
+ if (!LOG1_ENABLED()) {
+ return;
+ }
+
+ nsAutoCString name;
+ if (!aTask->GetName(name)) {
+ LOG1(("EXEC %p %p", aTask, this));
+ return;
+ }
+
+ LOG1(("EXEC %p %p [%s]", aTask, this, name.BeginReading()));
+}
+
+template <>
+LogTaskBase<IPC::Message>::Run::Run(IPC::Message* aMessage, bool aWillRunAgain)
+ : mWillRunAgain(aWillRunAgain) {
+ LOG1(("RECV %p %p %d [%s]", aMessage, this, aMessage->seqno(),
+ aMessage->name()));
+}
+
+template <>
+LogTaskBase<nsTimerImpl>::Run::Run(nsTimerImpl* aEvent, bool aWillRunAgain)
+ : mWillRunAgain(aWillRunAgain) {
+ // The name of the timer will be logged when running it on the target thread.
+ // Logging it here (on the `Timer` thread) would be redundant.
+ LOG1(("EXEC %p %p [nsTimerImpl]", aEvent, this));
+}
+
+template <typename T>
+LogTaskBase<T>::Run::~Run() {
+ LOG1((mWillRunAgain ? "INTERRUPTED %p" : "DONE %p", this));
+}
+
+template class LogTaskBase<nsIRunnable>;
+template class LogTaskBase<MicroTaskRunnable>;
+template class LogTaskBase<IPC::Message>;
+template class LogTaskBase<nsTimerImpl>;
+template class LogTaskBase<Task>;
+template class LogTaskBase<PresShell>;
+template class LogTaskBase<dom::FrameRequestCallback>;
+
+MOZ_THREAD_LOCAL(nsISerialEventTarget*)
+SerialEventTargetGuard::sCurrentThreadTLS;
+void SerialEventTargetGuard::InitTLS() {
+ MOZ_ASSERT(NS_IsMainThread());
+ if (!sCurrentThreadTLS.init()) {
+ MOZ_CRASH();
+ }
+}
+
+} // namespace mozilla
+
+bool nsIEventTarget::IsOnCurrentThread() {
+ if (mThread) {
+ return mThread == PR_GetCurrentThread();
+ }
+ return IsOnCurrentThreadInfallible();
+}
+
+extern "C" {
+// These functions use the C language linkage because they're exposed to Rust
+// via the xpcom/rust/moz_task crate, which wraps them in safe Rust functions
+// that enable Rust code to get/create threads and dispatch runnables on them.
+
+nsresult NS_GetCurrentThreadRust(nsIThread** aResult) {
+ return NS_GetCurrentThread(aResult);
+}
+
+nsresult NS_GetMainThreadRust(nsIThread** aResult) {
+ return NS_GetMainThread(aResult);
+}
+
+// NS_NewNamedThread's aStackSize parameter has the default argument
+// nsIThreadManager::DEFAULT_STACK_SIZE, but we can't omit default arguments
+// when calling a C++ function from Rust, and we can't access
+// nsIThreadManager::DEFAULT_STACK_SIZE in Rust to pass it explicitly,
+// since it is defined in a %{C++ ... %} block within nsIThreadManager.idl.
+// So we indirect through this function.
+nsresult NS_NewNamedThreadWithDefaultStackSize(const nsACString& aName,
+ nsIThread** aResult,
+ nsIRunnable* aEvent) {
+ return NS_NewNamedThread(aName, aResult, aEvent);
+}
+
+bool NS_IsOnCurrentThread(nsIEventTarget* aTarget) {
+ return aTarget->IsOnCurrentThread();
+}
+
+nsresult NS_DispatchBackgroundTask(nsIRunnable* aEvent,
+ uint32_t aDispatchFlags) {
+ return nsThreadManager::get().DispatchToBackgroundThread(aEvent,
+ aDispatchFlags);
+}
+
+nsresult NS_CreateBackgroundTaskQueue(const char* aName,
+ nsISerialEventTarget** aTarget) {
+ nsCOMPtr<nsISerialEventTarget> target =
+ nsThreadManager::get().CreateBackgroundTaskQueue(aName);
+ if (!target) {
+ return NS_ERROR_FAILURE;
+ }
+
+ target.forget(aTarget);
+ return NS_OK;
+}
+
+} // extern "C"
+
+nsresult NS_DispatchAndSpinEventLoopUntilComplete(
+ const nsACString& aVeryGoodReasonToDoThis, nsIEventTarget* aEventTarget,
+ already_AddRefed<nsIRunnable> aEvent) {
+ // NOTE: Get the current thread specifically, as `SpinEventLoopUntil` can
+ // only spin that event target's loop. The reply will specify
+ // NS_DISPATCH_IGNORE_BLOCK_DISPATCH to ensure the reply is received even if
+ // the caller is a threadpool thread.
+ nsCOMPtr<nsIThread> current = NS_GetCurrentThread();
+ if (NS_WARN_IF(!current)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ RefPtr<nsThreadSyncDispatch> wrapper =
+ new nsThreadSyncDispatch(current.forget(), std::move(aEvent));
+ nsresult rv = aEventTarget->Dispatch(do_AddRef(wrapper));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ // FIXME: Consider avoiding leaking the `nsThreadSyncDispatch` as well by
+ // using a fallible version of `Dispatch` once that is added.
+ return rv;
+ }
+
+ wrapper->SpinEventLoopUntilComplete(aVeryGoodReasonToDoThis);
+ return NS_OK;
+}
diff --git a/xpcom/threads/nsThreadUtils.h b/xpcom/threads/nsThreadUtils.h
new file mode 100644
index 0000000000..72041da295
--- /dev/null
+++ b/xpcom/threads/nsThreadUtils.h
@@ -0,0 +1,1925 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsThreadUtils_h__
+#define nsThreadUtils_h__
+
+#include <type_traits>
+#include <tuple>
+#include <utility>
+
+#include "MainThreadUtils.h"
+#include "mozilla/EventQueue.h"
+#include "mozilla/AbstractThread.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Likely.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ThreadLocal.h"
+#include "mozilla/TimeStamp.h"
+
+#include "nsCOMPtr.h"
+#include "nsICancelableRunnable.h"
+#include "nsIDiscardableRunnable.h"
+#include "nsIIdlePeriod.h"
+#include "nsIIdleRunnable.h"
+#include "nsINamed.h"
+#include "nsIRunnable.h"
+#include "nsIThreadManager.h"
+#include "nsITimer.h"
+#include "nsString.h"
+#include "prinrval.h"
+#include "prthread.h"
+
+class MessageLoop;
+class nsIThread;
+
+//-----------------------------------------------------------------------------
+// These methods are alternatives to the methods on nsIThreadManager, provided
+// for convenience.
+
+/**
+ * Create a new thread, and optionally provide an initial event for the thread.
+ *
+ * @param aName
+ * The name of the thread.
+ * @param aResult
+ * The resulting nsIThread object.
+ * @param aInitialEvent
+ * The initial event to run on this thread. This parameter may be null.
+ * @param aOptions
+ * Options used to configure thread creation.
+ * Options are documented in nsIThreadManager.idl.
+ *
+ * @returns NS_ERROR_INVALID_ARG
+ * Indicates that the given name is not unique.
+ */
+
+extern nsresult NS_NewNamedThread(
+ const nsACString& aName, nsIThread** aResult,
+ nsIRunnable* aInitialEvent = nullptr,
+ nsIThreadManager::ThreadCreationOptions aOptions = {});
+
+extern nsresult NS_NewNamedThread(
+ const nsACString& aName, nsIThread** aResult,
+ already_AddRefed<nsIRunnable> aInitialEvent,
+ nsIThreadManager::ThreadCreationOptions aOptions = {});
+
+template <size_t LEN>
+inline nsresult NS_NewNamedThread(
+ const char (&aName)[LEN], nsIThread** aResult,
+ already_AddRefed<nsIRunnable> aInitialEvent,
+ nsIThreadManager::ThreadCreationOptions aOptions = {}) {
+ static_assert(LEN <= 16, "Thread name must be no more than 16 characters");
+ return NS_NewNamedThread(nsDependentCString(aName, LEN - 1), aResult,
+ std::move(aInitialEvent), aOptions);
+}
+
+template <size_t LEN>
+inline nsresult NS_NewNamedThread(
+ const char (&aName)[LEN], nsIThread** aResult,
+ nsIRunnable* aInitialEvent = nullptr,
+ nsIThreadManager::ThreadCreationOptions aOptions = {}) {
+ nsCOMPtr<nsIRunnable> event = aInitialEvent;
+ static_assert(LEN <= 16, "Thread name must be no more than 16 characters");
+ return NS_NewNamedThread(nsDependentCString(aName, LEN - 1), aResult,
+ event.forget(), aOptions);
+}
+
+/**
+ * Get a reference to the current thread, creating it if it does not exist yet.
+ *
+ * @param aResult
+ * The resulting nsIThread object.
+ */
+extern nsresult NS_GetCurrentThread(nsIThread** aResult);
+
+/**
+ * Dispatch the given event to the current thread.
+ *
+ * @param aEvent
+ * The event to dispatch.
+ *
+ * @returns NS_ERROR_INVALID_ARG
+ * If event is null.
+ */
+extern nsresult NS_DispatchToCurrentThread(nsIRunnable* aEvent);
+extern nsresult NS_DispatchToCurrentThread(
+ already_AddRefed<nsIRunnable>&& aEvent);
+
+/**
+ * Dispatch the given event to the main thread.
+ *
+ * @param aEvent
+ * The event to dispatch.
+ * @param aDispatchFlags
+ * The flags to pass to the main thread's dispatch method.
+ *
+ * @returns NS_ERROR_INVALID_ARG
+ * If event is null.
+ */
+extern nsresult NS_DispatchToMainThread(
+ nsIRunnable* aEvent, uint32_t aDispatchFlags = NS_DISPATCH_NORMAL);
+extern nsresult NS_DispatchToMainThread(
+ already_AddRefed<nsIRunnable>&& aEvent,
+ uint32_t aDispatchFlags = NS_DISPATCH_NORMAL);
+
+extern nsresult NS_DelayedDispatchToCurrentThread(
+ already_AddRefed<nsIRunnable>&& aEvent, uint32_t aDelayMs);
+
+/**
+ * Dispatch the given event to the specified queue of the current thread.
+ *
+ * @param aEvent The event to dispatch.
+ * @param aQueue The event queue for the thread to use
+ *
+ * @returns NS_ERROR_INVALID_ARG
+ * If event is null.
+ * @returns NS_ERROR_UNEXPECTED
+ * If the thread is shutting down.
+ */
+extern nsresult NS_DispatchToCurrentThreadQueue(
+ already_AddRefed<nsIRunnable>&& aEvent, mozilla::EventQueuePriority aQueue);
+
+/**
+ * Dispatch the given event to the specified queue of the main thread.
+ *
+ * @param aEvent The event to dispatch.
+ * @param aQueue The event queue for the thread to use
+ *
+ * @returns NS_ERROR_INVALID_ARG
+ * If event is null.
+ * @returns NS_ERROR_UNEXPECTED
+ * If the thread is shutting down.
+ */
+extern nsresult NS_DispatchToMainThreadQueue(
+ already_AddRefed<nsIRunnable>&& aEvent, mozilla::EventQueuePriority aQueue);
+
+/**
+ * Dispatch the given event to an idle queue of the current thread.
+ *
+ * @param aEvent The event to dispatch. If the event implements
+ * nsIIdleRunnable, it will receive a call on
+ * nsIIdleRunnable::SetTimer when dispatched, with the value of
+ * aTimeout.
+ *
+ * @param aTimeout The time in milliseconds until the event should be
+ * moved from an idle queue to the regular queue, if it hasn't been
+ * executed. If aEvent is also an nsIIdleRunnable, it is expected
+ * that it should handle the timeout itself, after a call to
+ * nsIIdleRunnable::SetTimer.
+ *
+ * @param aQueue
+ * The event queue for the thread to use. Must be an idle queue
+ * (Idle or DeferredTimers)
+ *
+ * @returns NS_ERROR_INVALID_ARG
+ * If event is null.
+ * @returns NS_ERROR_UNEXPECTED
+ * If the thread is shutting down.
+ */
+extern nsresult NS_DispatchToCurrentThreadQueue(
+ already_AddRefed<nsIRunnable>&& aEvent, uint32_t aTimeout,
+ mozilla::EventQueuePriority aQueue);
+
+/**
+ * Dispatch the given event to a queue of a thread.
+ *
+ * @param aEvent The event to dispatch.
+ * @param aThread The target thread for the dispatch.
+ * @param aQueue The event queue for the thread to use.
+ *
+ * @returns NS_ERROR_INVALID_ARG
+ * If event is null.
+ * @returns NS_ERROR_UNEXPECTED
+ * If the thread is shutting down.
+ */
+extern nsresult NS_DispatchToThreadQueue(already_AddRefed<nsIRunnable>&& aEvent,
+ nsIThread* aThread,
+ mozilla::EventQueuePriority aQueue);
+
+/**
+ * Dispatch the given event to an idle queue of a thread.
+ *
+ * @param aEvent The event to dispatch. If the event implements
+ * nsIIdleRunnable, it will receive a call on
+ * nsIIdleRunnable::SetTimer when dispatched, with the value of
+ * aTimeout.
+ *
+ * @param aTimeout The time in milliseconds until the event should be
+ * moved from an idle queue to the regular queue, if it hasn't been
+ * executed. If aEvent is also an nsIIdleRunnable, it is expected
+ * that it should handle the timeout itself, after a call to
+ * nsIIdleRunnable::SetTimer.
+ *
+ * @param aThread The target thread for the dispatch.
+ *
+ * @param aQueue
+ * The event queue for the thread to use. Must be an idle queue
+ * (Idle or DeferredTimers)
+ *
+ * @returns NS_ERROR_INVALID_ARG
+ * If event is null.
+ * @returns NS_ERROR_UNEXPECTED
+ * If the thread is shutting down.
+ */
+extern nsresult NS_DispatchToThreadQueue(already_AddRefed<nsIRunnable>&& aEvent,
+ uint32_t aTimeout, nsIThread* aThread,
+ mozilla::EventQueuePriority aQueue);
+
+#ifndef XPCOM_GLUE_AVOID_NSPR
+/**
+ * Process all pending events for the given thread before returning. This
+ * method simply calls ProcessNextEvent on the thread while HasPendingEvents
+ * continues to return true and the time spent in NS_ProcessPendingEvents
+ * does not exceed the given timeout value.
+ *
+ * @param aThread
+ * The thread object for which to process pending events. If null, then
+ * events will be processed for the current thread.
+ * @param aTimeout
+ * The maximum number of milliseconds to spend processing pending events.
+ * Events are not pre-empted to honor this timeout. Rather, the timeout
+ * value is simply used to determine whether or not to process another event.
+ * Pass PR_INTERVAL_NO_TIMEOUT to specify no timeout.
+ */
+extern nsresult NS_ProcessPendingEvents(
+ nsIThread* aThread, PRIntervalTime aTimeout = PR_INTERVAL_NO_TIMEOUT);
+#endif
+
+/**
+ * Shortcut for nsIThread::HasPendingEvents.
+ *
+ * It is an error to call this function when the given thread is not the
+ * current thread. This function will return false if called from some
+ * other thread.
+ *
+ * @param aThread
+ * The current thread or null.
+ *
+ * @returns
+ * A boolean value that if "true" indicates that there are pending events
+ * in the current thread's event queue.
+ */
+extern bool NS_HasPendingEvents(nsIThread* aThread = nullptr);
+
+/**
+ * Shortcut for nsIThread::ProcessNextEvent.
+ *
+ * It is an error to call this function when the given thread is not the
+ * current thread. This function will simply return false if called
+ * from some other thread.
+ *
+ * @param aThread
+ * The current thread or null.
+ * @param aMayWait
+ * A boolean parameter that if "true" indicates that the method may block
+ * the calling thread to wait for a pending event.
+ *
+ * @returns
+ * A boolean value that if "true" indicates that an event from the current
+ * thread's event queue was processed.
+ */
+extern bool NS_ProcessNextEvent(nsIThread* aThread = nullptr,
+ bool aMayWait = true);
+
+/**
+ * Returns true if we're in the compositor thread.
+ *
+ * We declare this here because the headers required to invoke
+ * CompositorThreadHolder::IsInCompositorThread() also pull in a bunch of system
+ * headers that #define various tokens in a way that can break the build.
+ */
+extern bool NS_IsInCompositorThread();
+
+extern bool NS_IsInCanvasThreadOrWorker();
+
+extern bool NS_IsInVRThread();
+
+//-----------------------------------------------------------------------------
+// Helpers that work with nsCOMPtr:
+
+inline already_AddRefed<nsIThread> do_GetCurrentThread() {
+ nsIThread* thread = nullptr;
+ NS_GetCurrentThread(&thread);
+ return already_AddRefed<nsIThread>(thread);
+}
+
+inline already_AddRefed<nsIThread> do_GetMainThread() {
+ nsIThread* thread = nullptr;
+ NS_GetMainThread(&thread);
+ return already_AddRefed<nsIThread>(thread);
+}
+
+//-----------------------------------------------------------------------------
+
+// Fast access to the current thread. Will create an nsIThread if one does not
+// exist already! Do not release the returned pointer! If you want to use this
+// pointer from some other thread, then you will need to AddRef it. Otherwise,
+// you should only consider this pointer valid from code running on the current
+// thread.
+extern nsIThread* NS_GetCurrentThread();
+
+// Exactly the same as NS_GetCurrentThread, except it will not create an
+// nsThread if one does not exist yet. This is useful in cases where you have
+// code that runs on threads that may or may not not be driven by an nsThread
+// event loop, and wish to avoid inadvertently creating a superfluous nsThread.
+extern nsIThread* NS_GetCurrentThreadNoCreate();
+
+/**
+ * Set the name of the current thread. Prefer this function over
+ * PR_SetCurrentThreadName() if possible. The name will also be included in the
+ * crash report.
+ *
+ * @param aName
+ * Name of the thread. A C language null-terminated string.
+ */
+extern void NS_SetCurrentThreadName(const char* aName);
+
+//-----------------------------------------------------------------------------
+
+#ifndef XPCOM_GLUE_AVOID_NSPR
+
+namespace mozilla {
+
+// This class is designed to be subclassed.
+class IdlePeriod : public nsIIdlePeriod {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIIDLEPERIOD
+
+ IdlePeriod() = default;
+
+ protected:
+ virtual ~IdlePeriod() = default;
+
+ private:
+ IdlePeriod(const IdlePeriod&) = delete;
+ IdlePeriod& operator=(const IdlePeriod&) = delete;
+ IdlePeriod& operator=(const IdlePeriod&&) = delete;
+};
+
+// Cancelable runnable methods implement nsICancelableRunnable, and
+// Idle and IdleWithTimer also nsIIdleRunnable.
+enum class RunnableKind { Standard, Cancelable, Idle, IdleWithTimer };
+
+// Implementing nsINamed on Runnable bloats vtables for the hundreds of
+// Runnable subclasses that we have, so we want to avoid that overhead
+// when we're not using nsINamed for anything.
+# ifndef RELEASE_OR_BETA
+# define MOZ_COLLECTING_RUNNABLE_TELEMETRY
+# endif
+
+// This class is designed to be subclassed.
+class Runnable : public nsIRunnable
+# ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ ,
+ public nsINamed
+# endif
+{
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIRUNNABLE
+# ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ NS_DECL_NSINAMED
+# endif
+
+ Runnable() = delete;
+
+# ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ explicit Runnable(const char* aName) : mName(aName) {}
+# else
+ explicit Runnable(const char* aName) {}
+# endif
+
+ protected:
+ virtual ~Runnable() = default;
+
+# ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ const char* mName = nullptr;
+# endif
+
+ private:
+ Runnable(const Runnable&) = delete;
+ Runnable& operator=(const Runnable&) = delete;
+ Runnable& operator=(const Runnable&&) = delete;
+};
+
+// This is a base class for tasks that might not be run, such as those that may
+// be dispatched to workers.
+// The owner of an event target will call either Run() or OnDiscard()
+// exactly once.
+// Derived classes should override Run(). An OnDiscard() override may
+// provide cleanup when Run() will not be called.
+class DiscardableRunnable : public Runnable, public nsIDiscardableRunnable {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+ // nsIDiscardableRunnable
+ void OnDiscard() override {}
+
+ DiscardableRunnable() = delete;
+ explicit DiscardableRunnable(const char* aName) : Runnable(aName) {}
+
+ protected:
+ virtual ~DiscardableRunnable() = default;
+
+ private:
+ DiscardableRunnable(const DiscardableRunnable&) = delete;
+ DiscardableRunnable& operator=(const DiscardableRunnable&) = delete;
+ DiscardableRunnable& operator=(const DiscardableRunnable&&) = delete;
+};
+
+// This class is designed to be subclassed.
+// Derived classes should override Run() and Cancel() to provide that
+// calling Run() after Cancel() is a no-op.
+class CancelableRunnable : public DiscardableRunnable,
+ public nsICancelableRunnable {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+ // nsIDiscardableRunnable
+ void OnDiscard() override;
+ // nsICancelableRunnable
+ virtual nsresult Cancel() override = 0;
+
+ CancelableRunnable() = delete;
+ explicit CancelableRunnable(const char* aName) : DiscardableRunnable(aName) {}
+
+ protected:
+ virtual ~CancelableRunnable() = default;
+
+ private:
+ CancelableRunnable(const CancelableRunnable&) = delete;
+ CancelableRunnable& operator=(const CancelableRunnable&) = delete;
+ CancelableRunnable& operator=(const CancelableRunnable&&) = delete;
+};
+
+// This class is designed to be subclassed.
+class IdleRunnable : public DiscardableRunnable, public nsIIdleRunnable {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+
+ explicit IdleRunnable(const char* aName) : DiscardableRunnable(aName) {}
+
+ protected:
+ virtual ~IdleRunnable() = default;
+
+ private:
+ IdleRunnable(const IdleRunnable&) = delete;
+ IdleRunnable& operator=(const IdleRunnable&) = delete;
+ IdleRunnable& operator=(const IdleRunnable&&) = delete;
+};
+
+// This class is designed to be subclassed.
+class CancelableIdleRunnable : public CancelableRunnable,
+ public nsIIdleRunnable {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+
+ CancelableIdleRunnable() : CancelableRunnable("CancelableIdleRunnable") {}
+ explicit CancelableIdleRunnable(const char* aName)
+ : CancelableRunnable(aName) {}
+
+ protected:
+ virtual ~CancelableIdleRunnable() = default;
+
+ private:
+ CancelableIdleRunnable(const CancelableIdleRunnable&) = delete;
+ CancelableIdleRunnable& operator=(const CancelableIdleRunnable&) = delete;
+ CancelableIdleRunnable& operator=(const CancelableIdleRunnable&&) = delete;
+};
+
+// This class is designed to be a wrapper of a real runnable to support event
+// prioritizable.
+class PrioritizableRunnable : public Runnable, public nsIRunnablePriority {
+ public:
+ PrioritizableRunnable(already_AddRefed<nsIRunnable>&& aRunnable,
+ uint32_t aPriority);
+
+# ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
+ NS_IMETHOD GetName(nsACString& aName) override;
+# endif
+
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_NSIRUNNABLE
+ NS_DECL_NSIRUNNABLEPRIORITY
+
+ protected:
+ virtual ~PrioritizableRunnable() = default;
+
+ nsCOMPtr<nsIRunnable> mRunnable;
+ uint32_t mPriority;
+};
+
+class PrioritizableCancelableRunnable : public CancelableRunnable,
+ public nsIRunnablePriority {
+ public:
+ PrioritizableCancelableRunnable(uint32_t aPriority, const char* aName)
+ : CancelableRunnable(aName), mPriority(aPriority) {}
+
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_NSIRUNNABLEPRIORITY
+
+ protected:
+ virtual ~PrioritizableCancelableRunnable() = default;
+
+ const uint32_t mPriority;
+};
+
+extern already_AddRefed<nsIRunnable> CreateRenderBlockingRunnable(
+ already_AddRefed<nsIRunnable>&& aRunnable);
+
+namespace detail {
+
+// An event that can be used to call a C++11 functions or function objects,
+// including lambdas. The function must have no required arguments, and must
+// return void.
+template <typename StoredFunction>
+class RunnableFunction : public Runnable {
+ public:
+ template <typename F>
+ explicit RunnableFunction(const char* aName, F&& aFunction)
+ : Runnable(aName), mFunction(std::forward<F>(aFunction)) {}
+
+ NS_IMETHOD Run() override {
+ static_assert(std::is_void_v<decltype(mFunction())>,
+ "The lambda must return void!");
+ mFunction();
+ return NS_OK;
+ }
+
+ private:
+ StoredFunction mFunction;
+};
+
+// Type alias for NS_NewRunnableFunction
+template <typename Function>
+using RunnableFunctionImpl =
+ // Make sure we store a non-reference in nsRunnableFunction.
+ typename detail::RunnableFunction<std::remove_reference_t<Function>>;
+} // namespace detail
+
+namespace detail {
+
+template <typename CVRemoved>
+struct IsRefcountedSmartPointerHelper : std::false_type {};
+
+template <typename Pointee>
+struct IsRefcountedSmartPointerHelper<RefPtr<Pointee>> : std::true_type {};
+
+template <typename Pointee>
+struct IsRefcountedSmartPointerHelper<nsCOMPtr<Pointee>> : std::true_type {};
+
+} // namespace detail
+
+template <typename T>
+struct IsRefcountedSmartPointer
+ : detail::IsRefcountedSmartPointerHelper<std::remove_cv_t<T>> {};
+
+namespace detail {
+
+template <typename T, typename CVRemoved>
+struct RemoveSmartPointerHelper {
+ typedef T Type;
+};
+
+template <typename T, typename Pointee>
+struct RemoveSmartPointerHelper<T, RefPtr<Pointee>> {
+ typedef Pointee Type;
+};
+
+template <typename T, typename Pointee>
+struct RemoveSmartPointerHelper<T, nsCOMPtr<Pointee>> {
+ typedef Pointee Type;
+};
+
+} // namespace detail
+
+template <typename T>
+struct RemoveSmartPointer
+ : detail::RemoveSmartPointerHelper<T, std::remove_cv_t<T>> {};
+
+namespace detail {
+
+template <typename T, typename CVRemoved>
+struct RemoveRawOrSmartPointerHelper {
+ typedef T Type;
+};
+
+template <typename T, typename Pointee>
+struct RemoveRawOrSmartPointerHelper<T, Pointee*> {
+ typedef Pointee Type;
+};
+
+template <typename T, typename Pointee>
+struct RemoveRawOrSmartPointerHelper<T, RefPtr<Pointee>> {
+ typedef Pointee Type;
+};
+
+template <typename T, typename Pointee>
+struct RemoveRawOrSmartPointerHelper<T, nsCOMPtr<Pointee>> {
+ typedef Pointee Type;
+};
+
+} // namespace detail
+
+template <typename T>
+struct RemoveRawOrSmartPointer
+ : detail::RemoveRawOrSmartPointerHelper<T, std::remove_cv_t<T>> {};
+
+} // namespace mozilla
+
+inline nsISupports* ToSupports(mozilla::Runnable* p) {
+ return static_cast<nsIRunnable*>(p);
+}
+
+template <typename Function>
+already_AddRefed<mozilla::Runnable> NS_NewRunnableFunction(
+ const char* aName, Function&& aFunction) {
+ // We store a non-reference in RunnableFunction, but still forward aFunction
+ // to move if possible.
+ return do_AddRef(new mozilla::detail::RunnableFunctionImpl<Function>(
+ aName, std::forward<Function>(aFunction)));
+}
+
+// Creates a new object implementing nsIRunnable and nsICancelableRunnable,
+// which runs a given function on Run and clears the stored function object on a
+// call to `Cancel` (and thus destroys all objects it holds).
+template <typename Function>
+already_AddRefed<mozilla::CancelableRunnable> NS_NewCancelableRunnableFunction(
+ const char* aName, Function&& aFunc) {
+ class FuncCancelableRunnable final : public mozilla::CancelableRunnable {
+ public:
+ static_assert(
+ std::is_void_v<
+ decltype(std::declval<std::remove_reference_t<Function>>()())>);
+
+ NS_INLINE_DECL_REFCOUNTING_INHERITED(FuncCancelableRunnable,
+ CancelableRunnable)
+
+ explicit FuncCancelableRunnable(const char* aName, Function&& aFunc)
+ : CancelableRunnable{aName},
+ mFunc{mozilla::Some(std::forward<Function>(aFunc))} {}
+
+ NS_IMETHOD Run() override {
+ if (mFunc) {
+ (*mFunc)();
+ }
+
+ return NS_OK;
+ }
+
+ nsresult Cancel() override {
+ mFunc.reset();
+ return NS_OK;
+ }
+
+ private:
+ ~FuncCancelableRunnable() = default;
+
+ mozilla::Maybe<std::remove_reference_t<Function>> mFunc;
+ };
+
+ return mozilla::MakeAndAddRef<FuncCancelableRunnable>(
+ aName, std::forward<Function>(aFunc));
+}
+
+namespace mozilla {
+namespace detail {
+
+template <RunnableKind Kind>
+class TimerBehaviour {
+ public:
+ nsITimer* GetTimer() { return nullptr; }
+ void CancelTimer() {}
+
+ protected:
+ ~TimerBehaviour() = default;
+};
+
+template <>
+class TimerBehaviour<RunnableKind::IdleWithTimer> {
+ public:
+ nsITimer* GetTimer() {
+ if (!mTimer) {
+ mTimer = NS_NewTimer();
+ }
+
+ return mTimer;
+ }
+
+ void CancelTimer() {
+ if (mTimer) {
+ mTimer->Cancel();
+ }
+ }
+
+ protected:
+ ~TimerBehaviour() { CancelTimer(); }
+
+ private:
+ nsCOMPtr<nsITimer> mTimer;
+};
+
+} // namespace detail
+} // namespace mozilla
+
+// An event that can be used to call a method on a class. The class type must
+// support reference counting. This event supports Revoke for use
+// with nsRevocableEventPtr.
+template <class ClassType, typename ReturnType = void, bool Owning = true,
+ mozilla::RunnableKind Kind = mozilla::RunnableKind::Standard>
+class nsRunnableMethod
+ : public std::conditional_t<
+ Kind == mozilla::RunnableKind::Standard, mozilla::Runnable,
+ std::conditional_t<Kind == mozilla::RunnableKind::Cancelable,
+ mozilla::CancelableRunnable,
+ mozilla::CancelableIdleRunnable>>,
+ protected mozilla::detail::TimerBehaviour<Kind> {
+ using BaseType = std::conditional_t<
+ Kind == mozilla::RunnableKind::Standard, mozilla::Runnable,
+ std::conditional_t<Kind == mozilla::RunnableKind::Cancelable,
+ mozilla::CancelableRunnable,
+ mozilla::CancelableIdleRunnable>>;
+
+ public:
+ nsRunnableMethod(const char* aName) : BaseType(aName) {}
+
+ virtual void Revoke() = 0;
+
+ // These ReturnTypeEnforcer classes disallow return types that
+ // we know are not safe. The default ReturnTypeEnforcer compiles just fine but
+ // already_AddRefed will not.
+ template <typename OtherReturnType>
+ class ReturnTypeEnforcer {
+ public:
+ typedef int ReturnTypeIsSafe;
+ };
+
+ template <class T>
+ class ReturnTypeEnforcer<already_AddRefed<T>> {
+ // No ReturnTypeIsSafe makes this illegal!
+ };
+
+ // Make sure this return type is safe.
+ typedef typename ReturnTypeEnforcer<ReturnType>::ReturnTypeIsSafe check;
+};
+
+template <class ClassType, bool Owning>
+struct nsRunnableMethodReceiver {
+ RefPtr<ClassType> mObj;
+ explicit nsRunnableMethodReceiver(ClassType* aObj) : mObj(aObj) {}
+ explicit nsRunnableMethodReceiver(RefPtr<ClassType>&& aObj)
+ : mObj(std::move(aObj)) {}
+ ~nsRunnableMethodReceiver() { Revoke(); }
+ ClassType* Get() const { return mObj.get(); }
+ void Revoke() { mObj = nullptr; }
+};
+
+template <class ClassType>
+struct nsRunnableMethodReceiver<ClassType, false> {
+ ClassType* MOZ_NON_OWNING_REF mObj;
+ explicit nsRunnableMethodReceiver(ClassType* aObj) : mObj(aObj) {}
+ ClassType* Get() const { return mObj; }
+ void Revoke() { mObj = nullptr; }
+};
+
+static inline constexpr bool IsIdle(mozilla::RunnableKind aKind) {
+ return aKind == mozilla::RunnableKind::Idle ||
+ aKind == mozilla::RunnableKind::IdleWithTimer;
+}
+
+template <typename PtrType, typename Method, bool Owning,
+ mozilla::RunnableKind Kind>
+struct nsRunnableMethodTraits;
+
+template <typename PtrType, class C, typename R, bool Owning,
+ mozilla::RunnableKind Kind, typename... As>
+struct nsRunnableMethodTraits<PtrType, R (C::*)(As...), Owning, Kind> {
+ typedef typename mozilla::RemoveRawOrSmartPointer<PtrType>::Type class_type;
+ static_assert(std::is_base_of<C, class_type>::value,
+ "Stored class must inherit from method's class");
+ typedef R return_type;
+ typedef nsRunnableMethod<C, R, Owning, Kind> base_type;
+ static const bool can_cancel = Kind == mozilla::RunnableKind::Cancelable;
+};
+
+template <typename PtrType, class C, typename R, bool Owning,
+ mozilla::RunnableKind Kind, typename... As>
+struct nsRunnableMethodTraits<PtrType, R (C::*)(As...) const, Owning, Kind> {
+ typedef const typename mozilla::RemoveRawOrSmartPointer<PtrType>::Type
+ class_type;
+ static_assert(std::is_base_of<C, class_type>::value,
+ "Stored class must inherit from method's class");
+ typedef R return_type;
+ typedef nsRunnableMethod<C, R, Owning, Kind> base_type;
+ static const bool can_cancel = Kind == mozilla::RunnableKind::Cancelable;
+};
+
+# ifdef NS_HAVE_STDCALL
+template <typename PtrType, class C, typename R, bool Owning,
+ mozilla::RunnableKind Kind, typename... As>
+struct nsRunnableMethodTraits<PtrType, R (__stdcall C::*)(As...), Owning,
+ Kind> {
+ typedef typename mozilla::RemoveRawOrSmartPointer<PtrType>::Type class_type;
+ static_assert(std::is_base_of<C, class_type>::value,
+ "Stored class must inherit from method's class");
+ typedef R return_type;
+ typedef nsRunnableMethod<C, R, Owning, Kind> base_type;
+ static const bool can_cancel = Kind == mozilla::RunnableKind::Cancelable;
+};
+
+template <typename PtrType, class C, typename R, bool Owning,
+ mozilla::RunnableKind Kind>
+struct nsRunnableMethodTraits<PtrType, R (NS_STDCALL C::*)(), Owning, Kind> {
+ typedef typename mozilla::RemoveRawOrSmartPointer<PtrType>::Type class_type;
+ static_assert(std::is_base_of<C, class_type>::value,
+ "Stored class must inherit from method's class");
+ typedef R return_type;
+ typedef nsRunnableMethod<C, R, Owning, Kind> base_type;
+ static const bool can_cancel = Kind == mozilla::RunnableKind::Cancelable;
+};
+
+template <typename PtrType, class C, typename R, bool Owning,
+ mozilla::RunnableKind Kind, typename... As>
+struct nsRunnableMethodTraits<PtrType, R (__stdcall C::*)(As...) const, Owning,
+ Kind> {
+ typedef const typename mozilla::RemoveRawOrSmartPointer<PtrType>::Type
+ class_type;
+ static_assert(std::is_base_of<C, class_type>::value,
+ "Stored class must inherit from method's class");
+ typedef R return_type;
+ typedef nsRunnableMethod<C, R, Owning, Kind> base_type;
+ static const bool can_cancel = Kind == mozilla::RunnableKind::Cancelable;
+};
+
+template <typename PtrType, class C, typename R, bool Owning,
+ mozilla::RunnableKind Kind>
+struct nsRunnableMethodTraits<PtrType, R (NS_STDCALL C::*)() const, Owning,
+ Kind> {
+ typedef const typename mozilla::RemoveRawOrSmartPointer<PtrType>::Type
+ class_type;
+ static_assert(std::is_base_of<C, class_type>::value,
+ "Stored class must inherit from method's class");
+ typedef R return_type;
+ typedef nsRunnableMethod<C, R, Owning, Kind> base_type;
+ static const bool can_cancel = Kind == mozilla::RunnableKind::Cancelable;
+};
+# endif
+
+// IsParameterStorageClass<T>::value is true if T is a parameter-storage class
+// that will be recognized by NS_New[NonOwning]RunnableMethodWithArg[s] to
+// force a specific storage&passing strategy (instead of inferring one,
+// see ParameterStorage).
+// When creating a new storage class, add a specialization for it to be
+// recognized.
+template <typename T>
+struct IsParameterStorageClass : public std::false_type {};
+
+// StoreXPassByY structs used to inform nsRunnableMethodArguments how to
+// store arguments, and how to pass them to the target method.
+
+template <typename T>
+struct StoreCopyPassByValue {
+ using stored_type = std::decay_t<T>;
+ typedef stored_type passed_type;
+ stored_type m;
+ template <typename A>
+ MOZ_IMPLICIT StoreCopyPassByValue(A&& a) : m(std::forward<A>(a)) {}
+ passed_type PassAsParameter() { return m; }
+};
+template <typename S>
+struct IsParameterStorageClass<StoreCopyPassByValue<S>>
+ : public std::true_type {};
+
+template <typename T>
+struct StoreCopyPassByConstLRef {
+ using stored_type = std::decay_t<T>;
+ typedef const stored_type& passed_type;
+ stored_type m;
+ template <typename A>
+ MOZ_IMPLICIT StoreCopyPassByConstLRef(A&& a) : m(std::forward<A>(a)) {}
+ passed_type PassAsParameter() { return m; }
+};
+template <typename S>
+struct IsParameterStorageClass<StoreCopyPassByConstLRef<S>>
+ : public std::true_type {};
+
+template <typename T>
+struct StoreCopyPassByLRef {
+ using stored_type = std::decay_t<T>;
+ typedef stored_type& passed_type;
+ stored_type m;
+ template <typename A>
+ MOZ_IMPLICIT StoreCopyPassByLRef(A&& a) : m(std::forward<A>(a)) {}
+ passed_type PassAsParameter() { return m; }
+};
+template <typename S>
+struct IsParameterStorageClass<StoreCopyPassByLRef<S>> : public std::true_type {
+};
+
+template <typename T>
+struct StoreCopyPassByRRef {
+ using stored_type = std::decay_t<T>;
+ typedef stored_type&& passed_type;
+ stored_type m;
+ template <typename A>
+ MOZ_IMPLICIT StoreCopyPassByRRef(A&& a) : m(std::forward<A>(a)) {}
+ passed_type PassAsParameter() { return std::move(m); }
+};
+template <typename S>
+struct IsParameterStorageClass<StoreCopyPassByRRef<S>> : public std::true_type {
+};
+
+template <typename T>
+struct StoreRefPassByLRef {
+ typedef T& stored_type;
+ typedef T& passed_type;
+ stored_type m;
+ template <typename A>
+ MOZ_IMPLICIT StoreRefPassByLRef(A& a) : m(a) {}
+ passed_type PassAsParameter() { return m; }
+};
+template <typename S>
+struct IsParameterStorageClass<StoreRefPassByLRef<S>> : public std::true_type {
+};
+
+template <typename T>
+struct StoreConstRefPassByConstLRef {
+ typedef const T& stored_type;
+ typedef const T& passed_type;
+ stored_type m;
+ template <typename A>
+ MOZ_IMPLICIT StoreConstRefPassByConstLRef(const A& a) : m(a) {}
+ passed_type PassAsParameter() { return m; }
+};
+template <typename S>
+struct IsParameterStorageClass<StoreConstRefPassByConstLRef<S>>
+ : public std::true_type {};
+
+template <typename T>
+struct StoreRefPtrPassByPtr {
+ typedef RefPtr<T> stored_type;
+ typedef T* passed_type;
+ stored_type m;
+ template <typename A>
+ MOZ_IMPLICIT StoreRefPtrPassByPtr(A&& a) : m(std::forward<A>(a)) {}
+ passed_type PassAsParameter() { return m.get(); }
+};
+template <typename S>
+struct IsParameterStorageClass<StoreRefPtrPassByPtr<S>>
+ : public std::true_type {};
+
+template <typename T>
+struct StorePtrPassByPtr {
+ typedef T* stored_type;
+ typedef T* passed_type;
+ stored_type m;
+ template <typename A>
+ MOZ_IMPLICIT StorePtrPassByPtr(A a) : m(a) {}
+ passed_type PassAsParameter() { return m; }
+};
+template <typename S>
+struct IsParameterStorageClass<StorePtrPassByPtr<S>> : public std::true_type {};
+
+template <typename T>
+struct StoreConstPtrPassByConstPtr {
+ typedef const T* stored_type;
+ typedef const T* passed_type;
+ stored_type m;
+ template <typename A>
+ MOZ_IMPLICIT StoreConstPtrPassByConstPtr(A a) : m(a) {}
+ passed_type PassAsParameter() { return m; }
+};
+template <typename S>
+struct IsParameterStorageClass<StoreConstPtrPassByConstPtr<S>>
+ : public std::true_type {};
+
+template <typename T>
+struct StoreCopyPassByConstPtr {
+ typedef T stored_type;
+ typedef const T* passed_type;
+ stored_type m;
+ template <typename A>
+ MOZ_IMPLICIT StoreCopyPassByConstPtr(A&& a) : m(std::forward<A>(a)) {}
+ passed_type PassAsParameter() { return &m; }
+};
+template <typename S>
+struct IsParameterStorageClass<StoreCopyPassByConstPtr<S>>
+ : public std::true_type {};
+
+template <typename T>
+struct StoreCopyPassByPtr {
+ typedef T stored_type;
+ typedef T* passed_type;
+ stored_type m;
+ template <typename A>
+ MOZ_IMPLICIT StoreCopyPassByPtr(A&& a) : m(std::forward<A>(a)) {}
+ passed_type PassAsParameter() { return &m; }
+};
+template <typename S>
+struct IsParameterStorageClass<StoreCopyPassByPtr<S>> : public std::true_type {
+};
+
+namespace detail {
+
+template <typename>
+struct SFINAE1True : std::true_type {};
+
+template <class T>
+static auto HasRefCountMethodsTest(int)
+ -> SFINAE1True<decltype(std::declval<T>().AddRef(),
+ std::declval<T>().Release())>;
+template <class>
+static auto HasRefCountMethodsTest(long) -> std::false_type;
+
+template <class T>
+struct HasRefCountMethods : decltype(HasRefCountMethodsTest<T>(0)) {};
+
+template <typename TWithoutPointer>
+struct NonnsISupportsPointerStorageClass
+ : std::conditional<
+ std::is_const_v<TWithoutPointer>,
+ StoreConstPtrPassByConstPtr<std::remove_const_t<TWithoutPointer>>,
+ StorePtrPassByPtr<TWithoutPointer>> {
+ using Type = typename NonnsISupportsPointerStorageClass::conditional::type;
+};
+
+template <typename TWithoutPointer>
+struct PointerStorageClass
+ : std::conditional<
+ HasRefCountMethods<TWithoutPointer>::value,
+ StoreRefPtrPassByPtr<TWithoutPointer>,
+ typename NonnsISupportsPointerStorageClass<TWithoutPointer>::Type> {
+ using Type = typename PointerStorageClass::conditional::type;
+};
+
+template <typename TWithoutRef>
+struct LValueReferenceStorageClass
+ : std::conditional<
+ std::is_const_v<TWithoutRef>,
+ StoreConstRefPassByConstLRef<std::remove_const_t<TWithoutRef>>,
+ StoreRefPassByLRef<TWithoutRef>> {
+ using Type = typename LValueReferenceStorageClass::conditional::type;
+};
+
+template <typename T>
+struct SmartPointerStorageClass
+ : std::conditional<
+ mozilla::IsRefcountedSmartPointer<T>::value,
+ StoreRefPtrPassByPtr<typename mozilla::RemoveSmartPointer<T>::Type>,
+ StoreCopyPassByConstLRef<T>> {
+ using Type = typename SmartPointerStorageClass::conditional::type;
+};
+
+template <typename T>
+struct NonLValueReferenceStorageClass
+ : std::conditional<std::is_rvalue_reference_v<T>,
+ StoreCopyPassByRRef<std::remove_reference_t<T>>,
+ typename SmartPointerStorageClass<T>::Type> {
+ using Type = typename NonLValueReferenceStorageClass::conditional::type;
+};
+
+template <typename T>
+struct NonPointerStorageClass
+ : std::conditional<std::is_lvalue_reference_v<T>,
+ typename LValueReferenceStorageClass<
+ std::remove_reference_t<T>>::Type,
+ typename NonLValueReferenceStorageClass<T>::Type> {
+ using Type = typename NonPointerStorageClass::conditional::type;
+};
+
+template <typename T>
+struct NonParameterStorageClass
+ : std::conditional<
+ std::is_pointer_v<T>,
+ typename PointerStorageClass<std::remove_pointer_t<T>>::Type,
+ typename NonPointerStorageClass<T>::Type> {
+ using Type = typename NonParameterStorageClass::conditional::type;
+};
+
+// Choose storage&passing strategy based on preferred storage type:
+// - If IsParameterStorageClass<T>::value is true, use as-is.
+// - RC* -> StoreRefPtrPassByPtr<RC> :Store RefPtr<RC>, pass RC*
+// ^^ RC quacks like a ref-counted type (i.e., has AddRef and Release methods)
+// - const T* -> StoreConstPtrPassByConstPtr<T> :Store const T*, pass const T*
+// - T* -> StorePtrPassByPtr<T> :Store T*, pass T*.
+// - const T& -> StoreConstRefPassByConstLRef<T>:Store const T&, pass const T&.
+// - T& -> StoreRefPassByLRef<T> :Store T&, pass T&.
+// - T&& -> StoreCopyPassByRRef<T> :Store T, pass std::move(T).
+// - RefPtr<T>, nsCOMPtr<T>
+// -> StoreRefPtrPassByPtr<T> :Store RefPtr<T>, pass T*
+// - Other T -> StoreCopyPassByConstLRef<T> :Store T, pass const T&.
+// Other available explicit options:
+// - StoreCopyPassByValue<T> :Store T, pass T.
+// - StoreCopyPassByLRef<T> :Store T, pass T& (of copy!)
+// - StoreCopyPassByConstPtr<T> :Store T, pass const T*
+// - StoreCopyPassByPtr<T> :Store T, pass T* (of copy!)
+// Or create your own class with PassAsParameter() method, optional
+// clean-up in destructor, and with associated IsParameterStorageClass<>.
+template <typename T>
+struct ParameterStorage
+ : std::conditional<IsParameterStorageClass<T>::value, T,
+ typename NonParameterStorageClass<T>::Type> {
+ using Type = typename ParameterStorage::conditional::type;
+};
+
+template <class T>
+static auto HasSetDeadlineTest(int)
+ -> SFINAE1True<decltype(std::declval<T>().SetDeadline(
+ std::declval<mozilla::TimeStamp>()))>;
+
+template <class T>
+static auto HasSetDeadlineTest(long) -> std::false_type;
+
+template <class T>
+struct HasSetDeadline : decltype(HasSetDeadlineTest<T>(0)) {};
+
+template <class T>
+std::enable_if_t<::detail::HasSetDeadline<T>::value> SetDeadlineImpl(
+ T* aObj, mozilla::TimeStamp aTimeStamp) {
+ aObj->SetDeadline(aTimeStamp);
+}
+
+template <class T>
+std::enable_if_t<!::detail::HasSetDeadline<T>::value> SetDeadlineImpl(
+ T* aObj, mozilla::TimeStamp aTimeStamp) {}
+} /* namespace detail */
+
+namespace mozilla {
+namespace detail {
+
+// struct used to store arguments and later apply them to a method.
+template <typename... Ts>
+struct RunnableMethodArguments final {
+ std::tuple<typename ::detail::ParameterStorage<Ts>::Type...> mArguments;
+ template <typename... As>
+ explicit RunnableMethodArguments(As&&... aArguments)
+ : mArguments(std::forward<As>(aArguments)...) {}
+ template <class C, typename M>
+ decltype(auto) apply(C* o, M m) {
+ return std::apply(
+ [&o, m](auto&&... args) {
+ return ((*o).*m)(args.PassAsParameter()...);
+ },
+ mArguments);
+ }
+};
+
+template <typename PtrType, typename Method, bool Owning, RunnableKind Kind,
+ typename... Storages>
+class RunnableMethodImpl final
+ : public ::nsRunnableMethodTraits<PtrType, Method, Owning,
+ Kind>::base_type {
+ typedef typename ::nsRunnableMethodTraits<PtrType, Method, Owning, Kind>
+ Traits;
+
+ typedef typename Traits::class_type ClassType;
+ typedef typename Traits::base_type BaseType;
+ ::nsRunnableMethodReceiver<ClassType, Owning> mReceiver;
+ Method mMethod;
+ RunnableMethodArguments<Storages...> mArgs;
+ using BaseType::CancelTimer;
+ using BaseType::GetTimer;
+
+ private:
+ virtual ~RunnableMethodImpl() { Revoke(); };
+ static void TimedOut(nsITimer* aTimer, void* aClosure) {
+ static_assert(IsIdle(Kind), "Don't use me!");
+ RefPtr<CancelableIdleRunnable> r =
+ static_cast<CancelableIdleRunnable*>(aClosure);
+ r->SetDeadline(TimeStamp());
+ r->Run();
+ r->Cancel();
+ }
+
+ public:
+ template <typename ForwardedPtrType, typename... Args>
+ explicit RunnableMethodImpl(const char* aName, ForwardedPtrType&& aObj,
+ Method aMethod, Args&&... aArgs)
+ : BaseType(aName),
+ mReceiver(std::forward<ForwardedPtrType>(aObj)),
+ mMethod(aMethod),
+ mArgs(std::forward<Args>(aArgs)...) {
+ static_assert(sizeof...(Storages) == sizeof...(Args),
+ "Storages and Args should have equal sizes");
+ }
+
+ NS_IMETHOD Run() {
+ CancelTimer();
+
+ if (MOZ_LIKELY(mReceiver.Get())) {
+ mArgs.apply(mReceiver.Get(), mMethod);
+ }
+
+ return NS_OK;
+ }
+
+ nsresult Cancel() {
+ static_assert(Kind >= RunnableKind::Cancelable, "Don't use me!");
+ Revoke();
+ return NS_OK;
+ }
+
+ void Revoke() {
+ CancelTimer();
+ mReceiver.Revoke();
+ }
+
+ void SetDeadline(TimeStamp aDeadline) {
+ if (MOZ_LIKELY(mReceiver.Get())) {
+ ::detail::SetDeadlineImpl(mReceiver.Get(), aDeadline);
+ }
+ }
+
+ void SetTimer(uint32_t aDelay, nsIEventTarget* aTarget) {
+ MOZ_ASSERT(aTarget);
+
+ if (nsCOMPtr<nsITimer> timer = GetTimer()) {
+ timer->Cancel();
+ timer->SetTarget(aTarget);
+ timer->InitWithNamedFuncCallback(TimedOut, this, aDelay,
+ nsITimer::TYPE_ONE_SHOT,
+ "detail::RunnableMethodImpl::SetTimer");
+ }
+ }
+};
+
+// Type aliases for NewRunnableMethod.
+template <typename PtrType, typename Method>
+using OwningRunnableMethod =
+ typename ::nsRunnableMethodTraits<std::remove_reference_t<PtrType>, Method,
+ true, RunnableKind::Standard>::base_type;
+template <typename PtrType, typename Method, typename... Storages>
+using OwningRunnableMethodImpl =
+ RunnableMethodImpl<std::remove_reference_t<PtrType>, Method, true,
+ RunnableKind::Standard, Storages...>;
+
+// Type aliases for NewCancelableRunnableMethod.
+template <typename PtrType, typename Method>
+using CancelableRunnableMethod =
+ typename ::nsRunnableMethodTraits<std::remove_reference_t<PtrType>, Method,
+ true,
+ RunnableKind::Cancelable>::base_type;
+template <typename PtrType, typename Method, typename... Storages>
+using CancelableRunnableMethodImpl =
+ RunnableMethodImpl<std::remove_reference_t<PtrType>, Method, true,
+ RunnableKind::Cancelable, Storages...>;
+
+// Type aliases for NewIdleRunnableMethod.
+template <typename PtrType, typename Method>
+using IdleRunnableMethod =
+ typename ::nsRunnableMethodTraits<std::remove_reference_t<PtrType>, Method,
+ true, RunnableKind::Idle>::base_type;
+template <typename PtrType, typename Method, typename... Storages>
+using IdleRunnableMethodImpl =
+ RunnableMethodImpl<std::remove_reference_t<PtrType>, Method, true,
+ RunnableKind::Idle, Storages...>;
+
+// Type aliases for NewIdleRunnableMethodWithTimer.
+template <typename PtrType, typename Method>
+using IdleRunnableMethodWithTimer =
+ typename ::nsRunnableMethodTraits<std::remove_reference_t<PtrType>, Method,
+ true,
+ RunnableKind::IdleWithTimer>::base_type;
+template <typename PtrType, typename Method, typename... Storages>
+using IdleRunnableMethodWithTimerImpl =
+ RunnableMethodImpl<std::remove_reference_t<PtrType>, Method, true,
+ RunnableKind::IdleWithTimer, Storages...>;
+
+// Type aliases for NewNonOwningRunnableMethod.
+template <typename PtrType, typename Method>
+using NonOwningRunnableMethod =
+ typename ::nsRunnableMethodTraits<std::remove_reference_t<PtrType>, Method,
+ false, RunnableKind::Standard>::base_type;
+template <typename PtrType, typename Method, typename... Storages>
+using NonOwningRunnableMethodImpl =
+ RunnableMethodImpl<std::remove_reference_t<PtrType>, Method, false,
+ RunnableKind::Standard, Storages...>;
+
+// Type aliases for NonOwningCancelableRunnableMethod
+template <typename PtrType, typename Method>
+using NonOwningCancelableRunnableMethod =
+ typename ::nsRunnableMethodTraits<std::remove_reference_t<PtrType>, Method,
+ false,
+ RunnableKind::Cancelable>::base_type;
+template <typename PtrType, typename Method, typename... Storages>
+using NonOwningCancelableRunnableMethodImpl =
+ RunnableMethodImpl<std::remove_reference_t<PtrType>, Method, false,
+ RunnableKind::Cancelable, Storages...>;
+
+// Type aliases for NonOwningIdleRunnableMethod
+template <typename PtrType, typename Method>
+using NonOwningIdleRunnableMethod =
+ typename ::nsRunnableMethodTraits<std::remove_reference_t<PtrType>, Method,
+ false, RunnableKind::Idle>::base_type;
+template <typename PtrType, typename Method, typename... Storages>
+using NonOwningIdleRunnableMethodImpl =
+ RunnableMethodImpl<std::remove_reference_t<PtrType>, Method, false,
+ RunnableKind::Idle, Storages...>;
+
+// Type aliases for NewIdleRunnableMethodWithTimer.
+template <typename PtrType, typename Method>
+using NonOwningIdleRunnableMethodWithTimer =
+ typename ::nsRunnableMethodTraits<std::remove_reference_t<PtrType>, Method,
+ false,
+ RunnableKind::IdleWithTimer>::base_type;
+template <typename PtrType, typename Method, typename... Storages>
+using NonOwningIdleRunnableMethodWithTimerImpl =
+ RunnableMethodImpl<std::remove_reference_t<PtrType>, Method, false,
+ RunnableKind::IdleWithTimer, Storages...>;
+
+} // namespace detail
+
+// NewRunnableMethod and friends
+//
+// Very often in Gecko, you'll find yourself in a situation where you want
+// to invoke a method (with or without arguments) asynchronously. You
+// could write a small helper class inheriting from nsRunnable to handle
+// all these details, or you could let NewRunnableMethod take care of all
+// those details for you.
+//
+// The simplest use of NewRunnableMethod looks like:
+//
+// nsCOMPtr<nsIRunnable> event =
+// mozilla::NewRunnableMethod("description", myObject,
+// &MyClass::HandleEvent);
+// NS_DispatchToCurrentThread(event);
+//
+// Statically enforced constraints:
+// - myObject must be of (or implicitly convertible to) type MyClass
+// - MyClass must define AddRef and Release methods
+//
+// The "description" string should specify a human-readable name for the
+// runnable; the provided string is used by various introspection tools
+// in the browser.
+//
+// The created runnable will take a strong reference to `myObject`. For
+// non-refcounted objects, or refcounted objects with unusual refcounting
+// requirements, and if and only if you are 110% certain that `myObject`
+// will live long enough, you can use NewNonOwningRunnableMethod instead,
+// which will, as its name implies, take a non-owning reference. If you
+// find yourself having to use this function, you should accompany your use
+// with a proof comment describing why the runnable will not lead to
+// use-after-frees.
+//
+// (If you find yourself writing contorted code to Release() an object
+// asynchronously on a different thread, you should use the
+// NS_ProxyRelease function.)
+//
+// Invoking a method with arguments takes a little more care. The
+// natural extension of the above:
+//
+// nsCOMPtr<nsIRunnable> event =
+// mozilla::NewRunnableMethod("description", myObject,
+// &MyClass::HandleEvent,
+// arg1, arg2, ...);
+//
+// can lead to security hazards (e.g. passing in raw pointers to refcounted
+// objects and storing those raw pointers in the runnable). We therefore
+// require you to specify the storage types used by the runnable, just as
+// you would if you were writing out the class by hand:
+//
+// nsCOMPtr<nsIRunnable> event =
+// mozilla::NewRunnableMethod<RefPtr<T>, nsTArray<U>>
+// ("description", myObject, &MyClass::HandleEvent, arg1, arg2);
+//
+// Please note that you do not have to pass the same argument type as you
+// specify in the template arguments. For example, if you want to transfer
+// ownership to a runnable, you can write:
+//
+// RefPtr<T> ptr = ...;
+// nsTArray<U> array = ...;
+// nsCOMPtr<nsIRunnable> event =
+// mozilla::NewRunnableMethod<RefPtr<T>, nsTArray<U>>
+// ("description", myObject, &MyClass::DoSomething,
+// std::move(ptr), std::move(array));
+//
+// and there will be no extra AddRef/Release traffic, or copying of the array.
+//
+// Each type that you specify as a template argument to NewRunnableMethod
+// comes with its own style of storage in the runnable and its own style
+// of argument passing to the invoked method. See the comment for
+// ParameterStorage above for more details.
+//
+// If you need to customize the storage type and/or argument passing type,
+// you can write your own class to use as a template argument to
+// NewRunnableMethod. If you find yourself having to do that frequently,
+// please file a bug in Core::XPCOM about adding the custom type to the
+// core code in this file, and/or for custom rules for ParameterStorage
+// to select that strategy.
+//
+// For places that require you to use cancelable runnables, such as
+// workers, there's also NewCancelableRunnableMethod and its non-owning
+// counterpart. The runnables returned by these methods additionally
+// implement nsICancelableRunnable.
+//
+// Finally, all of the functions discussed above have additional overloads
+// that do not take a `const char*` as their first parameter; you may see
+// these in older code. The `const char*` overload is preferred and
+// should be used in new code exclusively.
+
+template <typename PtrType, typename Method>
+already_AddRefed<detail::OwningRunnableMethod<PtrType, Method>>
+NewRunnableMethod(const char* aName, PtrType&& aPtr, Method aMethod) {
+ return do_AddRef(new detail::OwningRunnableMethodImpl<PtrType, Method>(
+ aName, std::forward<PtrType>(aPtr), aMethod));
+}
+
+template <typename PtrType, typename Method>
+already_AddRefed<detail::CancelableRunnableMethod<PtrType, Method>>
+NewCancelableRunnableMethod(const char* aName, PtrType&& aPtr, Method aMethod) {
+ return do_AddRef(new detail::CancelableRunnableMethodImpl<PtrType, Method>(
+ aName, std::forward<PtrType>(aPtr), aMethod));
+}
+
+template <typename PtrType, typename Method>
+already_AddRefed<detail::IdleRunnableMethod<PtrType, Method>>
+NewIdleRunnableMethod(const char* aName, PtrType&& aPtr, Method aMethod) {
+ return do_AddRef(new detail::IdleRunnableMethodImpl<PtrType, Method>(
+ aName, std::forward<PtrType>(aPtr), aMethod));
+}
+
+template <typename PtrType, typename Method>
+already_AddRefed<detail::IdleRunnableMethodWithTimer<PtrType, Method>>
+NewIdleRunnableMethodWithTimer(const char* aName, PtrType&& aPtr,
+ Method aMethod) {
+ return do_AddRef(new detail::IdleRunnableMethodWithTimerImpl<PtrType, Method>(
+ aName, std::forward<PtrType>(aPtr), aMethod));
+}
+
+template <typename PtrType, typename Method>
+already_AddRefed<detail::NonOwningRunnableMethod<PtrType, Method>>
+NewNonOwningRunnableMethod(const char* aName, PtrType&& aPtr, Method aMethod) {
+ return do_AddRef(new detail::NonOwningRunnableMethodImpl<PtrType, Method>(
+ aName, std::forward<PtrType>(aPtr), aMethod));
+}
+
+template <typename PtrType, typename Method>
+already_AddRefed<detail::NonOwningCancelableRunnableMethod<PtrType, Method>>
+NewNonOwningCancelableRunnableMethod(const char* aName, PtrType&& aPtr,
+ Method aMethod) {
+ return do_AddRef(
+ new detail::NonOwningCancelableRunnableMethodImpl<PtrType, Method>(
+ aName, std::forward<PtrType>(aPtr), aMethod));
+}
+
+template <typename PtrType, typename Method>
+already_AddRefed<detail::NonOwningIdleRunnableMethod<PtrType, Method>>
+NewNonOwningIdleRunnableMethod(const char* aName, PtrType&& aPtr,
+ Method aMethod) {
+ return do_AddRef(new detail::NonOwningIdleRunnableMethodImpl<PtrType, Method>(
+ aName, std::forward<PtrType>(aPtr), aMethod));
+}
+
+template <typename PtrType, typename Method>
+already_AddRefed<detail::NonOwningIdleRunnableMethodWithTimer<PtrType, Method>>
+NewNonOwningIdleRunnableMethodWithTimer(const char* aName, PtrType&& aPtr,
+ Method aMethod) {
+ return do_AddRef(
+ new detail::NonOwningIdleRunnableMethodWithTimerImpl<PtrType, Method>(
+ aName, std::forward<PtrType>(aPtr), aMethod));
+}
+
+// Similar to NewRunnableMethod. Call like so:
+// nsCOMPtr<nsIRunnable> event =
+// NewRunnableMethod<Types,...>(myObject, &MyClass::HandleEvent, myArg1,...);
+// 'Types' are the stored type for each argument, see ParameterStorage for
+// details.
+template <typename... Storages, typename PtrType, typename Method,
+ typename... Args>
+already_AddRefed<detail::OwningRunnableMethod<PtrType, Method>>
+NewRunnableMethod(const char* aName, PtrType&& aPtr, Method aMethod,
+ Args&&... aArgs) {
+ static_assert(sizeof...(Storages) == sizeof...(Args),
+ "<Storages...> size should be equal to number of arguments");
+ return do_AddRef(
+ new detail::OwningRunnableMethodImpl<PtrType, Method, Storages...>(
+ aName, std::forward<PtrType>(aPtr), aMethod,
+ std::forward<Args>(aArgs)...));
+}
+
+template <typename... Storages, typename PtrType, typename Method,
+ typename... Args>
+already_AddRefed<detail::NonOwningRunnableMethod<PtrType, Method>>
+NewNonOwningRunnableMethod(const char* aName, PtrType&& aPtr, Method aMethod,
+ Args&&... aArgs) {
+ static_assert(sizeof...(Storages) == sizeof...(Args),
+ "<Storages...> size should be equal to number of arguments");
+ return do_AddRef(
+ new detail::NonOwningRunnableMethodImpl<PtrType, Method, Storages...>(
+ aName, std::forward<PtrType>(aPtr), aMethod,
+ std::forward<Args>(aArgs)...));
+}
+
+template <typename... Storages, typename PtrType, typename Method,
+ typename... Args>
+already_AddRefed<detail::CancelableRunnableMethod<PtrType, Method>>
+NewCancelableRunnableMethod(const char* aName, PtrType&& aPtr, Method aMethod,
+ Args&&... aArgs) {
+ static_assert(sizeof...(Storages) == sizeof...(Args),
+ "<Storages...> size should be equal to number of arguments");
+ return do_AddRef(
+ new detail::CancelableRunnableMethodImpl<PtrType, Method, Storages...>(
+ aName, std::forward<PtrType>(aPtr), aMethod,
+ std::forward<Args>(aArgs)...));
+}
+
+template <typename... Storages, typename PtrType, typename Method,
+ typename... Args>
+already_AddRefed<detail::NonOwningCancelableRunnableMethod<PtrType, Method>>
+NewNonOwningCancelableRunnableMethod(const char* aName, PtrType&& aPtr,
+ Method aMethod, Args&&... aArgs) {
+ static_assert(sizeof...(Storages) == sizeof...(Args),
+ "<Storages...> size should be equal to number of arguments");
+ return do_AddRef(
+ new detail::NonOwningCancelableRunnableMethodImpl<PtrType, Method,
+ Storages...>(
+ aName, std::forward<PtrType>(aPtr), aMethod,
+ std::forward<Args>(aArgs)...));
+}
+
+template <typename... Storages, typename PtrType, typename Method,
+ typename... Args>
+already_AddRefed<detail::IdleRunnableMethod<PtrType, Method>>
+NewIdleRunnableMethod(const char* aName, PtrType&& aPtr, Method aMethod,
+ Args&&... aArgs) {
+ static_assert(sizeof...(Storages) == sizeof...(Args),
+ "<Storages...> size should be equal to number of arguments");
+ return do_AddRef(
+ new detail::IdleRunnableMethodImpl<PtrType, Method, Storages...>(
+ aName, std::forward<PtrType>(aPtr), aMethod,
+ std::forward<Args>(aArgs)...));
+}
+
+template <typename... Storages, typename PtrType, typename Method,
+ typename... Args>
+already_AddRefed<detail::NonOwningIdleRunnableMethod<PtrType, Method>>
+NewNonOwningIdleRunnableMethod(const char* aName, PtrType&& aPtr,
+ Method aMethod, Args&&... aArgs) {
+ static_assert(sizeof...(Storages) == sizeof...(Args),
+ "<Storages...> size should be equal to number of arguments");
+ return do_AddRef(
+ new detail::NonOwningIdleRunnableMethodImpl<PtrType, Method, Storages...>(
+ aName, std::forward<PtrType>(aPtr), aMethod,
+ std::forward<Args>(aArgs)...));
+}
+
+} // namespace mozilla
+
+#endif // XPCOM_GLUE_AVOID_NSPR
+
+// This class is designed to be used when you have an event class E that has a
+// pointer back to resource class R. If R goes away while E is still pending,
+// then it is important to "revoke" E so that it does not try use R after R has
+// been destroyed. nsRevocableEventPtr makes it easy for R to manage such
+// situations:
+//
+// class R;
+//
+// class E : public mozilla::Runnable {
+// public:
+// void Revoke() {
+// mResource = nullptr;
+// }
+// private:
+// R *mResource;
+// };
+//
+// class R {
+// public:
+// void EventHandled() {
+// mEvent.Forget();
+// }
+// private:
+// nsRevocableEventPtr<E> mEvent;
+// };
+//
+// void R::PostEvent() {
+// // Make sure any pending event is revoked.
+// mEvent->Revoke();
+//
+// nsCOMPtr<nsIRunnable> event = new E();
+// if (NS_SUCCEEDED(NS_DispatchToCurrentThread(event))) {
+// // Keep pointer to event so we can revoke it.
+// mEvent = event;
+// }
+// }
+//
+// NS_IMETHODIMP E::Run() {
+// if (!mResource)
+// return NS_OK;
+// ...
+// mResource->EventHandled();
+// return NS_OK;
+// }
+//
+template <class T>
+class nsRevocableEventPtr {
+ public:
+ nsRevocableEventPtr() : mEvent(nullptr) {}
+ ~nsRevocableEventPtr() { Revoke(); }
+
+ const nsRevocableEventPtr& operator=(RefPtr<T>&& aEvent) {
+ if (mEvent != aEvent) {
+ Revoke();
+ mEvent = std::move(aEvent);
+ }
+ return *this;
+ }
+
+ void Revoke() {
+ if (mEvent) {
+ mEvent->Revoke();
+ mEvent = nullptr;
+ }
+ }
+
+ void Forget() { mEvent = nullptr; }
+ bool IsPending() { return mEvent != nullptr; }
+ T* get() { return mEvent; }
+
+ private:
+ // Not implemented
+ nsRevocableEventPtr(const nsRevocableEventPtr&);
+ nsRevocableEventPtr& operator=(const nsRevocableEventPtr&);
+
+ RefPtr<T> mEvent;
+};
+
+template <class T>
+inline already_AddRefed<T> do_AddRef(nsRevocableEventPtr<T>& aObj) {
+ return do_AddRef(aObj.get());
+}
+
+/**
+ * A simple helper to suffix thread pool name
+ * with incremental numbers.
+ */
+class nsThreadPoolNaming {
+ public:
+ nsThreadPoolNaming() = default;
+
+ /**
+ * Returns a thread name as "<aPoolName> #<n>" and increments the counter.
+ */
+ nsCString GetNextThreadName(const nsACString& aPoolName);
+
+ template <size_t LEN>
+ nsCString GetNextThreadName(const char (&aPoolName)[LEN]) {
+ return GetNextThreadName(nsDependentCString(aPoolName, LEN - 1));
+ }
+
+ private:
+ mozilla::Atomic<uint32_t> mCounter{0};
+
+ nsThreadPoolNaming(const nsThreadPoolNaming&) = delete;
+ void operator=(const nsThreadPoolNaming&) = delete;
+};
+
+/**
+ * Thread priority in most operating systems affect scheduling, not IO. This
+ * helper is used to set the current thread to low IO priority for the lifetime
+ * of the created object. You can only use this low priority IO setting within
+ * the context of the current thread.
+ */
+class MOZ_STACK_CLASS nsAutoLowPriorityIO {
+ public:
+ nsAutoLowPriorityIO();
+ ~nsAutoLowPriorityIO();
+
+ private:
+ bool lowIOPrioritySet;
+#if defined(XP_MACOSX)
+ int oldPriority;
+#endif
+};
+
+void NS_SetMainThread();
+
+// Used only on cooperatively scheduled "main" threads. Causes the thread to be
+// considered a main thread and also causes GetCurrentVirtualThread to return
+// aVirtualThread.
+void NS_SetMainThread(PRThread* aVirtualThread);
+
+// Used only on cooperatively scheduled "main" threads. Causes the thread to no
+// longer be considered a main thread. Also causes GetCurrentVirtualThread() to
+// return a unique value.
+void NS_UnsetMainThread();
+
+/**
+ * Return the expiration time of the next timer to run on the current
+ * thread. If that expiration time is greater than aDefault, then
+ * return aDefault. aSearchBound specifies a maximum number of timers
+ * to examine to find a timer on the current thread. If no timer that
+ * will run on the current thread is found after examining
+ * aSearchBound timers, return the highest seen expiration time as a
+ * best effort guess.
+ *
+ * Timers with either the type nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY or
+ * nsITIMER::TYPE_REPEATING_SLACK_LOW_PRIORITY will be skipped when
+ * searching for the next expiration time. This enables timers to
+ * have lower priority than callbacks dispatched from
+ * nsIThread::IdleDispatch.
+ */
+extern mozilla::TimeStamp NS_GetTimerDeadlineHintOnCurrentThread(
+ mozilla::TimeStamp aDefault, uint32_t aSearchBound);
+
+/**
+ * Dispatches the given event to a background thread. The primary benefit of
+ * this API is that you do not have to manage the lifetime of your own thread
+ * for running your own events; the thread manager will take care of the
+ * background thread's lifetime. Not having to manage your own thread also
+ * means less resource usage, as the underlying implementation here can manage
+ * spinning up and shutting down threads appropriately.
+ *
+ * NOTE: there is no guarantee that events dispatched via these APIs are run
+ * serially, in dispatch order; several dispatched events may run in parallel.
+ * If you depend on serial execution of dispatched events, you should use
+ * NS_CreateBackgroundTaskQueue instead, and dispatch events to the returned
+ * event target.
+ */
+extern nsresult NS_DispatchBackgroundTask(
+ already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aDispatchFlags = NS_DISPATCH_NORMAL);
+extern "C" nsresult NS_DispatchBackgroundTask(
+ nsIRunnable* aEvent, uint32_t aDispatchFlags = NS_DISPATCH_NORMAL);
+
+/**
+ * Obtain a new serial event target that dispatches runnables to a background
+ * thread. In many cases, this is a straight replacement for creating your
+ * own, private thread, and is generally preferred to creating your own,
+ * private thread.
+ */
+extern "C" nsresult NS_CreateBackgroundTaskQueue(
+ const char* aName, nsISerialEventTarget** aTarget);
+
+/**
+ * Dispatch the given runnable to the given event target, spinning the current
+ * thread's event loop until the runnable has finished executing.
+ *
+ * This is roughly equivalent to the previously-supported `NS_DISPATCH_SYNC`
+ * flag.
+ */
+extern nsresult NS_DispatchAndSpinEventLoopUntilComplete(
+ const nsACString& aVeryGoodReasonToDoThis, nsIEventTarget* aEventTarget,
+ already_AddRefed<nsIRunnable> aEvent);
+
+// Predeclaration for logging function below
+namespace IPC {
+class Message;
+class MessageReader;
+class MessageWriter;
+} // namespace IPC
+
+class nsTimerImpl;
+
+namespace mozilla {
+
+// RAII class that will set the TLS entry to return the currently running
+// nsISerialEventTarget.
+// It should be used from inner event loop implementation.
+class SerialEventTargetGuard {
+ public:
+ explicit SerialEventTargetGuard(nsISerialEventTarget* aThread)
+ : mLastCurrentThread(sCurrentThreadTLS.get()) {
+ Set(aThread);
+ }
+
+ ~SerialEventTargetGuard() { sCurrentThreadTLS.set(mLastCurrentThread); }
+
+ static void InitTLS();
+ static nsISerialEventTarget* GetCurrentSerialEventTarget() {
+ return sCurrentThreadTLS.get();
+ }
+
+ protected:
+ friend class ::MessageLoop;
+ static void Set(nsISerialEventTarget* aThread) {
+ MOZ_ASSERT(aThread->IsOnCurrentThread());
+ sCurrentThreadTLS.set(aThread);
+ }
+
+ private:
+ static MOZ_THREAD_LOCAL(nsISerialEventTarget*) sCurrentThreadTLS;
+ nsISerialEventTarget* mLastCurrentThread;
+};
+
+// Get the serial event target corresponding to the currently executing task
+// queue or thread. This method will assert if called on a thread pool without
+// an active task queue.
+//
+// This function should generally be preferred over NS_GetCurrentThread since it
+// will return a more useful answer when called from a task queue running on a
+// thread pool or on a non-xpcom thread which accepts runnable dispatches.
+//
+// NOTE: The returned nsISerialEventTarget may not accept runnable dispatches
+// (e.g. if it corresponds to a non-xpcom thread), however it may still be used
+// to check if you're on the given thread/queue using IsOnCurrentThread().
+
+nsISerialEventTarget* GetCurrentSerialEventTarget();
+
+// Get a weak reference to a serial event target which can be used to dispatch
+// runnables to the main thread.
+//
+// NOTE: While this is currently a weak pointer to the nsIThread* returned from
+// NS_GetMainThread(), this may change in the future.
+
+nsISerialEventTarget* GetMainThreadSerialEventTarget();
+
+// Returns the number of CPUs, like PR_GetNumberOfProcessors, except
+// that it can return a cached value on platforms where sandboxing
+// would prevent reading the current value (currently Linux). CPU
+// hotplugging is uncommon, so this is unlikely to make a difference
+// in practice.
+size_t GetNumberOfProcessors();
+
+/**
+ * A helper class to log tasks dispatch and run with "MOZ_LOG=events:1". The
+ * output is more machine readable and creates a link between dispatch and run.
+ *
+ * Usage example for the concrete template type nsIRunnable.
+ * To log a dispatch, which means putting an event to a queue:
+ * LogRunnable::LogDispatch(event);
+ * theQueue.putEvent(event);
+ *
+ * To log execution (running) of the event:
+ * nsCOMPtr<nsIRunnable> event = theQueue.popEvent();
+ * {
+ * LogRunnable::Run log(event);
+ * event->Run();
+ * event = null; // to include the destructor code in the span
+ * }
+ *
+ * The class is a template so that we can support various specific super-types
+ * of tasks in the future. We can't use void* because it may cast differently
+ * and tracking the pointer in logs would then be impossible.
+ */
+template <typename T>
+class LogTaskBase {
+ public:
+ LogTaskBase() = delete;
+
+ // Adds a simple log about dispatch of this runnable.
+ static void LogDispatch(T* aEvent);
+ // The `aContext` pointer adds another uniqe identifier, nothing more
+ static void LogDispatch(T* aEvent, void* aContext);
+
+ // Logs dispatch of the message and along that also the PID of the target
+ // proccess, purposed for uniquely identifying IPC messages.
+ static void LogDispatchWithPid(T* aEvent, int32_t aPid);
+
+ // This is designed to surround a call to `Run()` or any code representing
+ // execution of the task body.
+ // The constructor adds a simple log about start of the runnable execution and
+ // the destructor adds a log about ending the execution.
+ class MOZ_RAII Run {
+ public:
+ Run() = delete;
+ explicit Run(T* aEvent, bool aWillRunAgain = false);
+ explicit Run(T* aEvent, void* aContext, bool aWillRunAgain = false);
+ ~Run();
+
+ // When this is called, the log in this RAII dtor will only say
+ // "interrupted" expecting that the event will run again.
+ void WillRunAgain() { mWillRunAgain = true; }
+
+ private:
+ bool mWillRunAgain = false;
+ };
+};
+
+class MicroTaskRunnable;
+class Task; // TaskController
+class PresShell;
+namespace dom {
+class FrameRequestCallback;
+} // namespace dom
+
+// Specialized methods must be explicitly predeclared.
+template <>
+LogTaskBase<nsIRunnable>::Run::Run(nsIRunnable* aEvent, bool aWillRunAgain);
+template <>
+LogTaskBase<Task>::Run::Run(Task* aTask, bool aWillRunAgain);
+template <>
+void LogTaskBase<IPC::Message>::LogDispatchWithPid(IPC::Message* aEvent,
+ int32_t aPid);
+template <>
+LogTaskBase<IPC::Message>::Run::Run(IPC::Message* aMessage, bool aWillRunAgain);
+template <>
+LogTaskBase<nsTimerImpl>::Run::Run(nsTimerImpl* aEvent, bool aWillRunAgain);
+
+typedef LogTaskBase<nsIRunnable> LogRunnable;
+typedef LogTaskBase<MicroTaskRunnable> LogMicroTaskRunnable;
+typedef LogTaskBase<IPC::Message> LogIPCMessage;
+typedef LogTaskBase<nsTimerImpl> LogTimerEvent;
+typedef LogTaskBase<Task> LogTask;
+typedef LogTaskBase<PresShell> LogPresShellObserver;
+typedef LogTaskBase<dom::FrameRequestCallback> LogFrameRequestCallback;
+// If you add new types don't forget to add:
+// `template class LogTaskBase<YourType>;` to nsThreadUtils.cpp
+
+} // namespace mozilla
+
+#endif // nsThreadUtils_h__
diff --git a/xpcom/threads/nsTimerImpl.cpp b/xpcom/threads/nsTimerImpl.cpp
new file mode 100644
index 0000000000..d143eebe86
--- /dev/null
+++ b/xpcom/threads/nsTimerImpl.cpp
@@ -0,0 +1,820 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsTimerImpl.h"
+
+#include <utility>
+
+#include "TimerThread.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Logging.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/ProfilerLabels.h"
+#include "mozilla/ResultExtensions.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/StaticMutex.h"
+#include "nsThreadManager.h"
+#include "nsThreadUtils.h"
+#include "pratom.h"
+
+#ifdef XP_WIN
+# include <process.h>
+# ifndef getpid
+# define getpid _getpid
+# endif
+#else
+# include <unistd.h>
+#endif
+
+using mozilla::Atomic;
+using mozilla::LogLevel;
+using mozilla::MakeRefPtr;
+using mozilla::MutexAutoLock;
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+// Holds the timer thread and manages all interactions with it
+// under a locked mutex. This wrapper is not destroyed until after
+// nsThreadManager shutdown to ensure we don't UAF during an offthread access to
+// the timer thread.
+class TimerThreadWrapper {
+ public:
+ constexpr TimerThreadWrapper() : mThread(nullptr){};
+ ~TimerThreadWrapper() = default;
+
+ nsresult Init();
+ void Shutdown();
+
+ nsresult AddTimer(nsTimerImpl* aTimer, const MutexAutoLock& aProofOfLock)
+ MOZ_REQUIRES(aTimer->mMutex);
+ nsresult RemoveTimer(nsTimerImpl* aTimer, const MutexAutoLock& aProofOfLock)
+ MOZ_REQUIRES(aTimer->mMutex);
+ TimeStamp FindNextFireTimeForCurrentThread(TimeStamp aDefault,
+ uint32_t aSearchBound);
+ uint32_t AllowedEarlyFiringMicroseconds();
+ nsresult GetTimers(nsTArray<RefPtr<nsITimer>>& aRetVal);
+
+ private:
+ static mozilla::StaticMutex sMutex;
+ TimerThread* mThread MOZ_GUARDED_BY(sMutex);
+};
+
+mozilla::StaticMutex TimerThreadWrapper::sMutex;
+
+nsresult TimerThreadWrapper::Init() {
+ mozilla::StaticMutexAutoLock lock(sMutex);
+ mThread = new TimerThread();
+
+ NS_ADDREF(mThread);
+
+ return NS_OK;
+}
+
+void TimerThreadWrapper::Shutdown() {
+ RefPtr<TimerThread> thread;
+
+ {
+ mozilla::StaticMutexAutoLock lock(sMutex);
+ if (!mThread) {
+ return;
+ }
+ thread = mThread;
+ }
+ // Shutdown calls |nsTimerImpl::Cancel| which needs to make a call into
+ // |RemoveTimer|. This can't be done under the lock.
+ thread->Shutdown();
+
+ {
+ mozilla::StaticMutexAutoLock lock(sMutex);
+ NS_RELEASE(mThread);
+ }
+}
+
+nsresult TimerThreadWrapper::AddTimer(nsTimerImpl* aTimer,
+ const MutexAutoLock& aProofOfLock) {
+ mozilla::StaticMutexAutoLock lock(sMutex);
+ if (mThread) {
+ return mThread->AddTimer(aTimer, aProofOfLock);
+ }
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+nsresult TimerThreadWrapper::RemoveTimer(nsTimerImpl* aTimer,
+ const MutexAutoLock& aProofOfLock) {
+ mozilla::StaticMutexAutoLock lock(sMutex);
+ if (mThread) {
+ return mThread->RemoveTimer(aTimer, aProofOfLock);
+ }
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+TimeStamp TimerThreadWrapper::FindNextFireTimeForCurrentThread(
+ TimeStamp aDefault, uint32_t aSearchBound) {
+ mozilla::StaticMutexAutoLock lock(sMutex);
+ return mThread
+ ? mThread->FindNextFireTimeForCurrentThread(aDefault, aSearchBound)
+ : TimeStamp();
+}
+
+uint32_t TimerThreadWrapper::AllowedEarlyFiringMicroseconds() {
+ mozilla::StaticMutexAutoLock lock(sMutex);
+ return mThread ? mThread->AllowedEarlyFiringMicroseconds() : 0;
+}
+
+nsresult TimerThreadWrapper::GetTimers(nsTArray<RefPtr<nsITimer>>& aRetVal) {
+ RefPtr<TimerThread> thread;
+ {
+ mozilla::StaticMutexAutoLock lock(sMutex);
+ if (!mThread) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ thread = mThread;
+ }
+ return thread->GetTimers(aRetVal);
+}
+
+static TimerThreadWrapper gThreadWrapper;
+
+// This module prints info about the precision of timers.
+static mozilla::LazyLogModule sTimerLog("nsTimerImpl");
+
+mozilla::LogModule* GetTimerLog() { return sTimerLog; }
+
+TimeStamp NS_GetTimerDeadlineHintOnCurrentThread(TimeStamp aDefault,
+ uint32_t aSearchBound) {
+ return gThreadWrapper.FindNextFireTimeForCurrentThread(aDefault,
+ aSearchBound);
+}
+
+already_AddRefed<nsITimer> NS_NewTimer() { return NS_NewTimer(nullptr); }
+
+already_AddRefed<nsITimer> NS_NewTimer(nsIEventTarget* aTarget) {
+ return nsTimer::WithEventTarget(aTarget).forget();
+}
+
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult> NS_NewTimerWithObserver(
+ nsIObserver* aObserver, uint32_t aDelay, uint32_t aType,
+ nsIEventTarget* aTarget) {
+ nsCOMPtr<nsITimer> timer;
+ MOZ_TRY(NS_NewTimerWithObserver(getter_AddRefs(timer), aObserver, aDelay,
+ aType, aTarget));
+ return std::move(timer);
+}
+nsresult NS_NewTimerWithObserver(nsITimer** aTimer, nsIObserver* aObserver,
+ uint32_t aDelay, uint32_t aType,
+ nsIEventTarget* aTarget) {
+ auto timer = nsTimer::WithEventTarget(aTarget);
+
+ MOZ_TRY(timer->Init(aObserver, aDelay, aType));
+ timer.forget(aTimer);
+ return NS_OK;
+}
+
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult> NS_NewTimerWithCallback(
+ nsITimerCallback* aCallback, uint32_t aDelay, uint32_t aType,
+ nsIEventTarget* aTarget) {
+ nsCOMPtr<nsITimer> timer;
+ MOZ_TRY(NS_NewTimerWithCallback(getter_AddRefs(timer), aCallback, aDelay,
+ aType, aTarget));
+ return std::move(timer);
+}
+nsresult NS_NewTimerWithCallback(nsITimer** aTimer, nsITimerCallback* aCallback,
+ uint32_t aDelay, uint32_t aType,
+ nsIEventTarget* aTarget) {
+ auto timer = nsTimer::WithEventTarget(aTarget);
+
+ MOZ_TRY(timer->InitWithCallback(aCallback, aDelay, aType));
+ timer.forget(aTimer);
+ return NS_OK;
+}
+
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult> NS_NewTimerWithCallback(
+ nsITimerCallback* aCallback, const TimeDuration& aDelay, uint32_t aType,
+ nsIEventTarget* aTarget) {
+ nsCOMPtr<nsITimer> timer;
+ MOZ_TRY(NS_NewTimerWithCallback(getter_AddRefs(timer), aCallback, aDelay,
+ aType, aTarget));
+ return std::move(timer);
+}
+nsresult NS_NewTimerWithCallback(nsITimer** aTimer, nsITimerCallback* aCallback,
+ const TimeDuration& aDelay, uint32_t aType,
+ nsIEventTarget* aTarget) {
+ auto timer = nsTimer::WithEventTarget(aTarget);
+
+ MOZ_TRY(timer->InitHighResolutionWithCallback(aCallback, aDelay, aType));
+ timer.forget(aTimer);
+ return NS_OK;
+}
+
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult> NS_NewTimerWithCallback(
+ std::function<void(nsITimer*)>&& aCallback, uint32_t aDelay, uint32_t aType,
+ const char* aNameString, nsIEventTarget* aTarget) {
+ nsCOMPtr<nsITimer> timer;
+ MOZ_TRY(NS_NewTimerWithCallback(getter_AddRefs(timer), std::move(aCallback),
+ aDelay, aType, aNameString, aTarget));
+ return timer;
+}
+nsresult NS_NewTimerWithCallback(nsITimer** aTimer,
+ std::function<void(nsITimer*)>&& aCallback,
+ uint32_t aDelay, uint32_t aType,
+ const char* aNameString,
+ nsIEventTarget* aTarget) {
+ return NS_NewTimerWithCallback(aTimer, std::move(aCallback),
+ TimeDuration::FromMilliseconds(aDelay), aType,
+ aNameString, aTarget);
+}
+
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult> NS_NewTimerWithCallback(
+ std::function<void(nsITimer*)>&& aCallback, const TimeDuration& aDelay,
+ uint32_t aType, const char* aNameString, nsIEventTarget* aTarget) {
+ nsCOMPtr<nsITimer> timer;
+ MOZ_TRY(NS_NewTimerWithCallback(getter_AddRefs(timer), std::move(aCallback),
+ aDelay, aType, aNameString, aTarget));
+ return timer;
+}
+nsresult NS_NewTimerWithCallback(nsITimer** aTimer,
+ std::function<void(nsITimer*)>&& aCallback,
+ const TimeDuration& aDelay, uint32_t aType,
+ const char* aNameString,
+ nsIEventTarget* aTarget) {
+ RefPtr<nsTimer> timer = nsTimer::WithEventTarget(aTarget);
+
+ MOZ_TRY(timer->InitWithClosureCallback(std::move(aCallback), aDelay, aType,
+ aNameString));
+ timer.forget(aTimer);
+ return NS_OK;
+}
+
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult> NS_NewTimerWithFuncCallback(
+ nsTimerCallbackFunc aCallback, void* aClosure, uint32_t aDelay,
+ uint32_t aType, const char* aNameString, nsIEventTarget* aTarget) {
+ nsCOMPtr<nsITimer> timer;
+ MOZ_TRY(NS_NewTimerWithFuncCallback(getter_AddRefs(timer), aCallback,
+ aClosure, aDelay, aType, aNameString,
+ aTarget));
+ return std::move(timer);
+}
+nsresult NS_NewTimerWithFuncCallback(nsITimer** aTimer,
+ nsTimerCallbackFunc aCallback,
+ void* aClosure, uint32_t aDelay,
+ uint32_t aType, const char* aNameString,
+ nsIEventTarget* aTarget) {
+ auto timer = nsTimer::WithEventTarget(aTarget);
+
+ MOZ_TRY(timer->InitWithNamedFuncCallback(aCallback, aClosure, aDelay, aType,
+ aNameString));
+ timer.forget(aTimer);
+ return NS_OK;
+}
+
+mozilla::Result<nsCOMPtr<nsITimer>, nsresult> NS_NewTimerWithFuncCallback(
+ nsTimerCallbackFunc aCallback, void* aClosure, const TimeDuration& aDelay,
+ uint32_t aType, const char* aNameString, nsIEventTarget* aTarget) {
+ nsCOMPtr<nsITimer> timer;
+ MOZ_TRY(NS_NewTimerWithFuncCallback(getter_AddRefs(timer), aCallback,
+ aClosure, aDelay, aType, aNameString,
+ aTarget));
+ return std::move(timer);
+}
+nsresult NS_NewTimerWithFuncCallback(nsITimer** aTimer,
+ nsTimerCallbackFunc aCallback,
+ void* aClosure, const TimeDuration& aDelay,
+ uint32_t aType, const char* aNameString,
+ nsIEventTarget* aTarget) {
+ auto timer = nsTimer::WithEventTarget(aTarget);
+
+ MOZ_TRY(timer->InitHighResolutionWithNamedFuncCallback(
+ aCallback, aClosure, aDelay, aType, aNameString));
+ timer.forget(aTimer);
+ return NS_OK;
+}
+
+// This module prints info about which timers are firing, which is useful for
+// wakeups for the purposes of power profiling. Set the following environment
+// variable before starting the browser.
+//
+// MOZ_LOG=TimerFirings:4
+//
+// Then a line will be printed for every timer that fires.
+//
+// If you redirect this output to a file called "out", you can then
+// post-process it with a command something like the following.
+//
+// cat out | grep timer | sort | uniq -c | sort -r -n
+//
+// This will show how often each unique line appears, with the most common ones
+// first.
+//
+// More detailed docs are here:
+// https://developer.mozilla.org/en-US/docs/Mozilla/Performance/TimerFirings_logging
+//
+static mozilla::LazyLogModule sTimerFiringsLog("TimerFirings");
+
+static mozilla::LogModule* GetTimerFiringsLog() { return sTimerFiringsLog; }
+
+#include <math.h>
+
+/* static */
+mozilla::StaticMutex nsTimerImpl::sDeltaMutex;
+/* static */
+double nsTimerImpl::sDeltaSumSquared MOZ_GUARDED_BY(nsTimerImpl::sDeltaMutex) =
+ 0;
+/* static */
+double nsTimerImpl::sDeltaSum MOZ_GUARDED_BY(nsTimerImpl::sDeltaMutex) = 0;
+/* static */
+double nsTimerImpl::sDeltaNum MOZ_GUARDED_BY(nsTimerImpl::sDeltaMutex) = 0;
+
+static void myNS_MeanAndStdDev(double n, double sumOfValues,
+ double sumOfSquaredValues, double* meanResult,
+ double* stdDevResult) {
+ double mean = 0.0, var = 0.0, stdDev = 0.0;
+ if (n > 0.0 && sumOfValues >= 0) {
+ mean = sumOfValues / n;
+ double temp = (n * sumOfSquaredValues) - (sumOfValues * sumOfValues);
+ if (temp < 0.0 || n <= 1) {
+ var = 0.0;
+ } else {
+ var = temp / (n * (n - 1));
+ }
+ // for some reason, Windows says sqrt(0.0) is "-1.#J" (?!) so do this:
+ stdDev = var != 0.0 ? sqrt(var) : 0.0;
+ }
+ *meanResult = mean;
+ *stdDevResult = stdDev;
+}
+
+NS_IMPL_QUERY_INTERFACE(nsTimer, nsITimer)
+NS_IMPL_ADDREF(nsTimer)
+
+NS_IMPL_ISUPPORTS(nsTimerManager, nsITimerManager)
+
+NS_IMETHODIMP nsTimerManager::GetTimers(nsTArray<RefPtr<nsITimer>>& aRetVal) {
+ return gThreadWrapper.GetTimers(aRetVal);
+}
+
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsTimer::Release(void) {
+ nsrefcnt count = --mRefCnt;
+ NS_LOG_RELEASE(this, count, "nsTimer");
+
+ if (count == 1) {
+ // Last ref, in nsTimerImpl::mITimer. Make sure the cycle is broken.
+ mImpl->CancelImpl(true);
+ } else if (count == 0) {
+ delete this;
+ }
+
+ return count;
+}
+
+nsTimerImpl::nsTimerImpl(nsITimer* aTimer, nsIEventTarget* aTarget)
+ : mEventTarget(aTarget),
+ mIsInTimerThread(false),
+ mType(0),
+ mGeneration(0),
+ mITimer(aTimer),
+ mMutex("nsTimerImpl::mMutex"),
+ mCallback(UnknownCallback{}),
+ mFiring(0) {
+ // XXX some code creates timers during xpcom shutdown, when threads are no
+ // longer available, so we cannot turn this on yet.
+ // MOZ_ASSERT(mEventTarget);
+}
+
+// static
+nsresult nsTimerImpl::Startup() { return gThreadWrapper.Init(); }
+
+void nsTimerImpl::Shutdown() {
+ if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) {
+ mozilla::StaticMutexAutoLock lock(sDeltaMutex);
+ double mean = 0, stddev = 0;
+ myNS_MeanAndStdDev(sDeltaNum, sDeltaSum, sDeltaSumSquared, &mean, &stddev);
+
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("sDeltaNum = %f, sDeltaSum = %f, sDeltaSumSquared = %f\n",
+ sDeltaNum, sDeltaSum, sDeltaSumSquared));
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("mean: %fms, stddev: %fms\n", mean, stddev));
+ }
+
+ gThreadWrapper.Shutdown();
+}
+
+nsresult nsTimerImpl::InitCommon(const TimeDuration& aDelay, uint32_t aType,
+ Callback&& newCallback,
+ const MutexAutoLock& aProofOfLock) {
+ if (!mEventTarget) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ gThreadWrapper.RemoveTimer(this, aProofOfLock);
+
+ // If we have an existing callback, using `swap` ensures it's destroyed after
+ // the mutex is unlocked in our caller.
+ std::swap(mCallback, newCallback);
+ ++mGeneration;
+
+ mType = (uint8_t)aType;
+ mDelay = aDelay;
+ mTimeout = TimeStamp::Now() + mDelay;
+
+ return gThreadWrapper.AddTimer(this, aProofOfLock);
+}
+
+nsresult nsTimerImpl::InitWithNamedFuncCallback(nsTimerCallbackFunc aFunc,
+ void* aClosure, uint32_t aDelay,
+ uint32_t aType,
+ const char* aName) {
+ return InitHighResolutionWithNamedFuncCallback(
+ aFunc, aClosure, TimeDuration::FromMilliseconds(aDelay), aType, aName);
+}
+
+nsresult nsTimerImpl::InitHighResolutionWithNamedFuncCallback(
+ nsTimerCallbackFunc aFunc, void* aClosure, const TimeDuration& aDelay,
+ uint32_t aType, const char* aName) {
+ if (NS_WARN_IF(!aFunc)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ Callback cb{FuncCallback{aFunc, aClosure, aName}};
+
+ MutexAutoLock lock(mMutex);
+ return InitCommon(aDelay, aType, std::move(cb), lock);
+}
+
+nsresult nsTimerImpl::InitWithCallback(nsITimerCallback* aCallback,
+ uint32_t aDelayInMs, uint32_t aType) {
+ return InitHighResolutionWithCallback(
+ aCallback, TimeDuration::FromMilliseconds(aDelayInMs), aType);
+}
+
+nsresult nsTimerImpl::InitHighResolutionWithCallback(
+ nsITimerCallback* aCallback, const TimeDuration& aDelay, uint32_t aType) {
+ if (NS_WARN_IF(!aCallback)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ // Goes out of scope after the unlock, prevents deadlock
+ Callback cb{nsCOMPtr{aCallback}};
+
+ MutexAutoLock lock(mMutex);
+ return InitCommon(aDelay, aType, std::move(cb), lock);
+}
+
+nsresult nsTimerImpl::Init(nsIObserver* aObserver, uint32_t aDelayInMs,
+ uint32_t aType) {
+ if (NS_WARN_IF(!aObserver)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ Callback cb{nsCOMPtr{aObserver}};
+
+ MutexAutoLock lock(mMutex);
+ return InitCommon(TimeDuration::FromMilliseconds(aDelayInMs), aType,
+ std::move(cb), lock);
+}
+
+nsresult nsTimerImpl::InitWithClosureCallback(
+ std::function<void(nsITimer*)>&& aCallback, const TimeDuration& aDelay,
+ uint32_t aType, const char* aNameString) {
+ if (NS_WARN_IF(!aCallback)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ Callback cb{ClosureCallback{std::move(aCallback), aNameString}};
+
+ MutexAutoLock lock(mMutex);
+ return InitCommon(aDelay, aType, std::move(cb), lock);
+}
+
+nsresult nsTimerImpl::Cancel() {
+ CancelImpl(false);
+ return NS_OK;
+}
+
+void nsTimerImpl::CancelImpl(bool aClearITimer) {
+ Callback cbTrash{UnknownCallback{}};
+ RefPtr<nsITimer> timerTrash;
+
+ {
+ MutexAutoLock lock(mMutex);
+ gThreadWrapper.RemoveTimer(this, lock);
+
+ // The swap ensures our callback isn't dropped until after the mutex is
+ // unlocked.
+ std::swap(cbTrash, mCallback);
+ ++mGeneration;
+
+ // Don't clear this if we're firing; once Fire returns, we'll get this call
+ // again.
+ if (aClearITimer && !mFiring) {
+ MOZ_RELEASE_ASSERT(
+ mITimer,
+ "mITimer was nulled already! "
+ "This indicates that someone has messed up the refcount on nsTimer!");
+ timerTrash.swap(mITimer);
+ }
+ }
+}
+
+nsresult nsTimerImpl::SetDelay(uint32_t aDelay) {
+ MutexAutoLock lock(mMutex);
+ if (GetCallback().is<UnknownCallback>() && !IsRepeating()) {
+ // This may happen if someone tries to re-use a one-shot timer
+ // by re-setting delay instead of reinitializing the timer.
+ NS_ERROR(
+ "nsITimer->SetDelay() called when the "
+ "one-shot timer is not set up.");
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ bool reAdd = false;
+ reAdd = NS_SUCCEEDED(gThreadWrapper.RemoveTimer(this, lock));
+
+ mDelay = TimeDuration::FromMilliseconds(aDelay);
+ mTimeout = TimeStamp::Now() + mDelay;
+
+ if (reAdd) {
+ gThreadWrapper.AddTimer(this, lock);
+ }
+
+ return NS_OK;
+}
+
+nsresult nsTimerImpl::GetDelay(uint32_t* aDelay) {
+ MutexAutoLock lock(mMutex);
+ *aDelay = mDelay.ToMilliseconds();
+ return NS_OK;
+}
+
+nsresult nsTimerImpl::SetType(uint32_t aType) {
+ MutexAutoLock lock(mMutex);
+ mType = (uint8_t)aType;
+ // XXX if this is called, we should change the actual type.. this could effect
+ // repeating timers. we need to ensure in Fire() that if mType has changed
+ // during the callback that we don't end up with the timer in the queue twice.
+ return NS_OK;
+}
+
+nsresult nsTimerImpl::GetType(uint32_t* aType) {
+ MutexAutoLock lock(mMutex);
+ *aType = mType;
+ return NS_OK;
+}
+
+nsresult nsTimerImpl::GetClosure(void** aClosure) {
+ MutexAutoLock lock(mMutex);
+ if (GetCallback().is<FuncCallback>()) {
+ *aClosure = GetCallback().as<FuncCallback>().mClosure;
+ } else {
+ *aClosure = nullptr;
+ }
+ return NS_OK;
+}
+
+nsresult nsTimerImpl::GetCallback(nsITimerCallback** aCallback) {
+ MutexAutoLock lock(mMutex);
+ if (GetCallback().is<InterfaceCallback>()) {
+ NS_IF_ADDREF(*aCallback = GetCallback().as<InterfaceCallback>());
+ } else {
+ *aCallback = nullptr;
+ }
+ return NS_OK;
+}
+
+nsresult nsTimerImpl::GetTarget(nsIEventTarget** aTarget) {
+ MutexAutoLock lock(mMutex);
+ NS_IF_ADDREF(*aTarget = mEventTarget);
+ return NS_OK;
+}
+
+nsresult nsTimerImpl::SetTarget(nsIEventTarget* aTarget) {
+ MutexAutoLock lock(mMutex);
+ if (NS_WARN_IF(!mCallback.is<UnknownCallback>())) {
+ return NS_ERROR_ALREADY_INITIALIZED;
+ }
+
+ if (aTarget) {
+ mEventTarget = aTarget;
+ } else {
+ mEventTarget = mozilla::GetCurrentSerialEventTarget();
+ }
+ return NS_OK;
+}
+
+nsresult nsTimerImpl::GetAllowedEarlyFiringMicroseconds(uint32_t* aValueOut) {
+ *aValueOut = gThreadWrapper.AllowedEarlyFiringMicroseconds();
+ return NS_OK;
+}
+
+void nsTimerImpl::Fire(int32_t aGeneration) {
+ uint8_t oldType;
+ uint32_t oldDelay;
+ TimeStamp oldTimeout;
+ Callback callbackDuringFire{UnknownCallback{}};
+ nsCOMPtr<nsITimer> timer;
+
+ {
+ // Don't fire callbacks or fiddle with refcounts when the mutex is locked.
+ // If some other thread Cancels/Inits after this, they're just too late.
+ MutexAutoLock lock(mMutex);
+ if (aGeneration != mGeneration) {
+ // This timer got rescheduled or cancelled before we fired, so ignore this
+ // firing
+ return;
+ }
+
+ // We modify mTimeout, so we must not be in the current TimerThread's
+ // mTimers list.
+ MOZ_ASSERT(!mIsInTimerThread);
+
+ ++mFiring;
+ callbackDuringFire = mCallback;
+ oldType = mType;
+ oldDelay = mDelay.ToMilliseconds();
+ oldTimeout = mTimeout;
+ // Ensure that the nsITimer does not unhook from the nsTimerImpl during
+ // Fire; this will cause null pointer crashes if the user of the timer drops
+ // its reference, and then uses the nsITimer* passed in the callback.
+ timer = mITimer;
+ }
+
+ AUTO_PROFILER_LABEL("nsTimerImpl::Fire", OTHER);
+
+ TimeStamp fireTime = TimeStamp::Now();
+ if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) {
+ TimeDuration delta = fireTime - oldTimeout;
+ int32_t d = delta.ToMilliseconds(); // delta in ms
+ {
+ mozilla::StaticMutexAutoLock lock(sDeltaMutex);
+ sDeltaSum += abs(d);
+ sDeltaSumSquared += double(d) * double(d);
+ sDeltaNum++;
+ }
+
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] expected delay time %4ums\n", this, oldDelay));
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] actual delay time %4dms\n", this, oldDelay + d));
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] (mType is %d) -------\n", this, oldType));
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] delta %4dms\n", this, d));
+ }
+
+ if (MOZ_LOG_TEST(GetTimerFiringsLog(), LogLevel::Debug)) {
+ LogFiring(callbackDuringFire, oldType, oldDelay);
+ }
+
+ callbackDuringFire.match(
+ [](const UnknownCallback&) {},
+ [&](const InterfaceCallback& i) { i->Notify(timer); },
+ [&](const ObserverCallback& o) {
+ o->Observe(timer, NS_TIMER_CALLBACK_TOPIC, nullptr);
+ },
+ [&](const FuncCallback& f) { f.mFunc(timer, f.mClosure); },
+ [&](const ClosureCallback& c) { c.mFunc(timer); });
+
+ TimeStamp now = TimeStamp::Now();
+
+ MutexAutoLock lock(mMutex);
+ if (aGeneration == mGeneration) {
+ if (IsRepeating()) {
+ // Repeating timer has not been re-init or canceled; reschedule
+ if (IsSlack()) {
+ mTimeout = now + mDelay;
+ } else {
+ if (mDelay) {
+ // If we are late enough finishing the callback that we have missed
+ // some firings, do not attempt to play catchup, just get back on the
+ // cadence we're supposed to maintain.
+ unsigned missedFirings =
+ static_cast<unsigned>((now - mTimeout) / mDelay);
+ mTimeout += mDelay * (missedFirings + 1);
+ } else {
+ // Can we stop allowing repeating timers with delay 0?
+ mTimeout = now;
+ }
+ }
+ gThreadWrapper.AddTimer(this, lock);
+ } else {
+ // Non-repeating timer that has not been re-scheduled. Clear.
+ // XXX(nika): Other callsites seem to go to some effort to avoid
+ // destroying mCallback when it's held. Why not this one?
+ mCallback = mozilla::AsVariant(UnknownCallback{});
+ }
+ }
+
+ --mFiring;
+
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] Took %fms to fire timer callback\n", this,
+ (now - fireTime).ToMilliseconds()));
+}
+
+// See the big comment above GetTimerFiringsLog() to understand this code.
+void nsTimerImpl::LogFiring(const Callback& aCallback, uint8_t aType,
+ uint32_t aDelay) {
+ const char* typeStr;
+ switch (aType) {
+ case nsITimer::TYPE_ONE_SHOT:
+ typeStr = "ONE_SHOT ";
+ break;
+ case nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY:
+ typeStr = "ONE_LOW ";
+ break;
+ case nsITimer::TYPE_REPEATING_SLACK:
+ typeStr = "SLACK ";
+ break;
+ case nsITimer::TYPE_REPEATING_SLACK_LOW_PRIORITY:
+ typeStr = "SLACK_LOW ";
+ break;
+ case nsITimer::TYPE_REPEATING_PRECISE: /* fall through */
+ case nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP:
+ typeStr = "PRECISE ";
+ break;
+ default:
+ MOZ_CRASH("bad type");
+ }
+
+ aCallback.match(
+ [&](const UnknownCallback&) {
+ MOZ_LOG(
+ GetTimerFiringsLog(), LogLevel::Debug,
+ ("[%d] ??? timer (%s, %5d ms)\n", getpid(), typeStr, aDelay));
+ },
+ [&](const InterfaceCallback& i) {
+ MOZ_LOG(GetTimerFiringsLog(), LogLevel::Debug,
+ ("[%d] iface timer (%s %5d ms): %p\n", getpid(), typeStr,
+ aDelay, i.get()));
+ },
+ [&](const ObserverCallback& o) {
+ MOZ_LOG(GetTimerFiringsLog(), LogLevel::Debug,
+ ("[%d] obs timer (%s %5d ms): %p\n", getpid(), typeStr,
+ aDelay, o.get()));
+ },
+ [&](const FuncCallback& f) {
+ MOZ_LOG(GetTimerFiringsLog(), LogLevel::Debug,
+ ("[%d] fn timer (%s %5d ms): %s\n", getpid(), typeStr,
+ aDelay, f.mName));
+ },
+ [&](const ClosureCallback& c) {
+ MOZ_LOG(GetTimerFiringsLog(), LogLevel::Debug,
+ ("[%d] closure timer (%s %5d ms): %s\n", getpid(), typeStr,
+ aDelay, c.mName));
+ });
+}
+
+void nsTimerImpl::GetName(nsACString& aName,
+ const MutexAutoLock& aProofOfLock) {
+ GetCallback().match(
+ [&](const UnknownCallback&) { aName.AssignLiteral("Canceled_timer"); },
+ [&](const InterfaceCallback& i) {
+ if (nsCOMPtr<nsINamed> named = do_QueryInterface(i)) {
+ named->GetName(aName);
+ } else {
+ aName.AssignLiteral("Anonymous_interface_timer");
+ }
+ },
+ [&](const ObserverCallback& o) {
+ if (nsCOMPtr<nsINamed> named = do_QueryInterface(o)) {
+ named->GetName(aName);
+ } else {
+ aName.AssignLiteral("Anonymous_observer_timer");
+ }
+ },
+ [&](const FuncCallback& f) { aName.Assign(f.mName); },
+ [&](const ClosureCallback& c) { aName.Assign(c.mName); });
+}
+
+nsresult nsTimerImpl::GetName(nsACString& aName) {
+ MutexAutoLock lock(mMutex);
+ GetName(aName, lock);
+ return NS_OK;
+}
+
+nsTimer::~nsTimer() = default;
+
+size_t nsTimer::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) {
+ return aMallocSizeOf(this);
+}
+
+/* static */
+RefPtr<nsTimer> nsTimer::WithEventTarget(nsIEventTarget* aTarget) {
+ if (!aTarget) {
+ aTarget = mozilla::GetCurrentSerialEventTarget();
+ }
+ return do_AddRef(new nsTimer(aTarget));
+}
+
+/* static */
+nsresult nsTimer::XPCOMConstructor(REFNSIID aIID, void** aResult) {
+ *aResult = nullptr;
+ auto timer = WithEventTarget(nullptr);
+
+ return timer->QueryInterface(aIID, aResult);
+}
diff --git a/xpcom/threads/nsTimerImpl.h b/xpcom/threads/nsTimerImpl.h
new file mode 100644
index 0000000000..49f1fd00d5
--- /dev/null
+++ b/xpcom/threads/nsTimerImpl.h
@@ -0,0 +1,231 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsTimerImpl_h___
+#define nsTimerImpl_h___
+
+#include "nsITimer.h"
+#include "nsIEventTarget.h"
+#include "nsIObserver.h"
+
+#include "nsCOMPtr.h"
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/StaticMutex.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/Variant.h"
+#include "mozilla/Logging.h"
+
+extern mozilla::LogModule* GetTimerLog();
+
+#define NS_TIMER_CID \
+ { /* 5ff24248-1dd2-11b2-8427-fbab44f29bc8 */ \
+ 0x5ff24248, 0x1dd2, 0x11b2, { \
+ 0x84, 0x27, 0xfb, 0xab, 0x44, 0xf2, 0x9b, 0xc8 \
+ } \
+ }
+
+class nsIObserver;
+
+namespace mozilla {
+class LogModule;
+}
+
+// TimerThread, nsTimerEvent, and nsTimer have references to these. nsTimer has
+// a separate lifecycle so we can Cancel() the underlying timer when the user of
+// the nsTimer has let go of its last reference.
+class nsTimerImpl {
+ ~nsTimerImpl() {
+ MOZ_ASSERT(!mIsInTimerThread);
+
+ // The nsITimer interface requires that its users keep a reference to the
+ // timers they use while those timers are initialized but have not yet
+ // fired. If this assert ever fails, it is a bug in the code that created
+ // and used the timer.
+ //
+ // Further, note that this should never fail even with a misbehaving user,
+ // because nsTimer::Release checks for a refcount of 1 with an armed timer
+ // (a timer whose only reference is from the timer thread) and when it hits
+ // this will remove the timer from the timer thread and thus destroy the
+ // last reference, preventing this situation from occurring.
+ MOZ_ASSERT(
+ mCallback.is<UnknownCallback>() || mEventTarget->IsOnCurrentThread(),
+ "Must not release mCallback off-target without canceling");
+ }
+
+ public:
+ typedef mozilla::TimeStamp TimeStamp;
+
+ nsTimerImpl(nsITimer* aTimer, nsIEventTarget* aTarget);
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(nsTimerImpl)
+ NS_DECL_NON_VIRTUAL_NSITIMER
+
+ static nsresult Startup();
+ static void Shutdown();
+
+ void SetDelayInternal(uint32_t aDelay, TimeStamp aBase = TimeStamp::Now());
+ void CancelImpl(bool aClearITimer);
+
+ void Fire(int32_t aGeneration);
+
+ int32_t GetGeneration() { return mGeneration; }
+
+ struct UnknownCallback {};
+
+ using InterfaceCallback = nsCOMPtr<nsITimerCallback>;
+
+ using ObserverCallback = nsCOMPtr<nsIObserver>;
+
+ /// A raw function pointer and its closed-over state, along with its name for
+ /// logging purposes.
+ struct FuncCallback {
+ nsTimerCallbackFunc mFunc;
+ void* mClosure;
+ const char* mName;
+ };
+
+ /// A callback defined by an owned closure and its name for logging purposes.
+ struct ClosureCallback {
+ std::function<void(nsITimer*)> mFunc;
+ const char* mName;
+ };
+
+ using Callback =
+ mozilla::Variant<UnknownCallback, InterfaceCallback, ObserverCallback,
+ FuncCallback, ClosureCallback>;
+
+ nsresult InitCommon(const mozilla::TimeDuration& aDelay, uint32_t aType,
+ Callback&& newCallback,
+ const mozilla::MutexAutoLock& aProofOfLock)
+ MOZ_REQUIRES(mMutex);
+
+ Callback& GetCallback() MOZ_REQUIRES(mMutex) {
+ mMutex.AssertCurrentThreadOwns();
+ return mCallback;
+ }
+
+ bool IsRepeating() const {
+ static_assert(nsITimer::TYPE_ONE_SHOT < nsITimer::TYPE_REPEATING_SLACK,
+ "invalid ordering of timer types!");
+ static_assert(
+ nsITimer::TYPE_REPEATING_SLACK < nsITimer::TYPE_REPEATING_PRECISE,
+ "invalid ordering of timer types!");
+ static_assert(nsITimer::TYPE_REPEATING_PRECISE <
+ nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP,
+ "invalid ordering of timer types!");
+ return mType >= nsITimer::TYPE_REPEATING_SLACK &&
+ mType < nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY;
+ }
+
+ bool IsLowPriority() const {
+ return mType == nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY ||
+ mType == nsITimer::TYPE_REPEATING_SLACK_LOW_PRIORITY;
+ }
+
+ bool IsSlack() const {
+ return mType == nsITimer::TYPE_REPEATING_SLACK ||
+ mType == nsITimer::TYPE_REPEATING_SLACK_LOW_PRIORITY;
+ }
+
+ void GetName(nsACString& aName, const mozilla::MutexAutoLock& aProofOfLock)
+ MOZ_REQUIRES(mMutex);
+
+ bool IsInTimerThread() const { return mIsInTimerThread; }
+ void SetIsInTimerThread(bool aIsInTimerThread) {
+ mIsInTimerThread = aIsInTimerThread;
+ }
+
+ nsCOMPtr<nsIEventTarget> mEventTarget;
+
+ void LogFiring(const Callback& aCallback, uint8_t aType, uint32_t aDelay);
+
+ nsresult InitWithClosureCallback(std::function<void(nsITimer*)>&& aCallback,
+ const mozilla::TimeDuration& aDelay,
+ uint32_t aType, const char* aNameString);
+
+ // Is this timer currently referenced from a TimerThread::Entry?
+ // Note: It is cleared before the Entry is destroyed. Take() also sets it to
+ // false, to indicate it's no longer in the TimerThread's list. This Take()
+ // call is NOT made under the nsTimerImpl's mutex (all other
+ // SetIsInTimerThread calls are under the mutex). However, ALL accesses to
+ // mIsInTimerThread are under the TimerThread's Monitor lock, so consistency
+ // is guaranteed by that.
+ bool mIsInTimerThread;
+
+ // These members are set by the initiating thread, when the timer's type is
+ // changed and during the period where it fires on that thread.
+ uint8_t mType;
+
+ // The generation number of this timer, re-generated each time the timer is
+ // initialized so one-shot timers can be canceled and re-initialized by the
+ // arming thread without any bad race conditions.
+ // Updated only after this timer has been removed from the timer thread.
+ int32_t mGeneration;
+
+ mozilla::TimeDuration mDelay MOZ_GUARDED_BY(mMutex);
+ // Never updated while in the TimerThread's timer list. Only updated
+ // before adding to that list or during nsTimerImpl::Fire(), when it has
+ // been removed from the TimerThread's list. TimerThread can access
+ // mTimeout of any timer in the list safely
+ mozilla::TimeStamp mTimeout;
+
+ RefPtr<nsITimer> mITimer MOZ_GUARDED_BY(mMutex);
+ mozilla::Mutex mMutex;
+ Callback mCallback MOZ_GUARDED_BY(mMutex);
+ // Counter because in rare cases we can Fire reentrantly
+ unsigned int mFiring MOZ_GUARDED_BY(mMutex);
+
+ static mozilla::StaticMutex sDeltaMutex;
+ static double sDeltaSum MOZ_GUARDED_BY(sDeltaMutex);
+ static double sDeltaSumSquared MOZ_GUARDED_BY(sDeltaMutex);
+ static double sDeltaNum MOZ_GUARDED_BY(sDeltaMutex);
+};
+
+class nsTimer final : public nsITimer {
+ explicit nsTimer(nsIEventTarget* aTarget)
+ : mImpl(new nsTimerImpl(this, aTarget)) {}
+
+ virtual ~nsTimer();
+
+ public:
+ friend class TimerThread;
+ friend class nsTimerEvent;
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_FORWARD_SAFE_NSITIMER(mImpl);
+
+ // NOTE: This constructor is not exposed on `nsITimer` as NS_FORWARD_SAFE_
+ // does not support forwarding rvalue references.
+ nsresult InitWithClosureCallback(std::function<void(nsITimer*)>&& aCallback,
+ const mozilla::TimeDuration& aDelay,
+ uint32_t aType, const char* aNameString) {
+ return mImpl ? mImpl->InitWithClosureCallback(std::move(aCallback), aDelay,
+ aType, aNameString)
+ : NS_ERROR_NULL_POINTER;
+ }
+
+ // Create a timer targeting the given target. nullptr indicates that the
+ // current thread should be used as the timer's target.
+ static RefPtr<nsTimer> WithEventTarget(nsIEventTarget* aTarget);
+
+ static nsresult XPCOMConstructor(REFNSIID aIID, void** aResult);
+
+ private:
+ // nsTimerImpl holds a strong ref to us. When our refcount goes to 1, we will
+ // null this to break the cycle.
+ RefPtr<nsTimerImpl> mImpl;
+};
+
+class nsTimerManager final : public nsITimerManager {
+ public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSITIMERMANAGER
+ private:
+ ~nsTimerManager() = default;
+};
+
+#endif /* nsTimerImpl_h___ */