summaryrefslogtreecommitdiffstats
path: root/xpcom/threads/nsThreadUtils.cpp
blob: 68d36605b388027458b556420634a3d4dd04baa6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

#include "nsThreadUtils.h"

#include "chrome/common/ipc_message.h"  // for IPC::Message
#include "LeakRefPtr.h"
#include "mozilla/Attributes.h"
#include "mozilla/Likely.h"
#include "mozilla/TaskQueue.h"
#include "mozilla/TimeStamp.h"
#include "nsComponentManagerUtils.h"
#include "nsExceptionHandler.h"
#include "nsIEventTarget.h"
#include "nsITimer.h"
#include "nsString.h"
#include "nsThreadSyncDispatch.h"
#include "nsTimerImpl.h"
#include "prsystem.h"

#include "nsThreadManager.h"
#include "nsThreadPool.h"
#include "TaskController.h"

#ifdef XP_WIN
#  include <windows.h>
#elif defined(XP_MACOSX)
#  include <sys/resource.h>
#endif

#if defined(ANDROID)
#  include <sys/prctl.h>
#endif

static mozilla::LazyLogModule sEventDispatchAndRunLog("events");
#ifdef LOG1
#  undef LOG1
#endif
#define LOG1(args) \
  MOZ_LOG(sEventDispatchAndRunLog, mozilla::LogLevel::Error, args)
#define LOG1_ENABLED() \
  MOZ_LOG_TEST(sEventDispatchAndRunLog, mozilla::LogLevel::Error)

using namespace mozilla;

#ifndef XPCOM_GLUE_AVOID_NSPR

NS_IMPL_ISUPPORTS(IdlePeriod, nsIIdlePeriod)

NS_IMETHODIMP
IdlePeriod::GetIdlePeriodHint(TimeStamp* aIdleDeadline) {
  *aIdleDeadline = TimeStamp();
  return NS_OK;
}

// NS_IMPL_NAMED_* relies on the mName field, which is not present on
// release or beta. Instead, fall back to using "Runnable" for all
// runnables.
#  ifndef MOZ_COLLECTING_RUNNABLE_TELEMETRY
NS_IMPL_ISUPPORTS(Runnable, nsIRunnable)
#  else
NS_IMPL_NAMED_ADDREF(Runnable, mName)
NS_IMPL_NAMED_RELEASE(Runnable, mName)
NS_IMPL_QUERY_INTERFACE(Runnable, nsIRunnable, nsINamed)
#  endif

NS_IMETHODIMP
Runnable::Run() {
  // Do nothing
  return NS_OK;
}

#  ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
NS_IMETHODIMP
Runnable::GetName(nsACString& aName) {
  if (mName) {
    aName.AssignASCII(mName);
  } else {
    aName.Truncate();
  }
  return NS_OK;
}
#  endif

NS_IMPL_ISUPPORTS_INHERITED(DiscardableRunnable, Runnable,
                            nsIDiscardableRunnable)

NS_IMPL_ISUPPORTS_INHERITED(CancelableRunnable, DiscardableRunnable,
                            nsICancelableRunnable)

void CancelableRunnable::OnDiscard() {
  // Tasks that implement Cancel() can be safely cleaned up if it turns out
  // that the task will not run.
  (void)NS_WARN_IF(NS_FAILED(Cancel()));
}

NS_IMPL_ISUPPORTS_INHERITED(IdleRunnable, DiscardableRunnable, nsIIdleRunnable)

NS_IMPL_ISUPPORTS_INHERITED(CancelableIdleRunnable, CancelableRunnable,
                            nsIIdleRunnable)

NS_IMPL_ISUPPORTS_INHERITED(PrioritizableRunnable, Runnable,
                            nsIRunnablePriority)

PrioritizableRunnable::PrioritizableRunnable(
    already_AddRefed<nsIRunnable>&& aRunnable, uint32_t aPriority)
    // Real runnable name is managed by overridding the GetName function.
    : Runnable("PrioritizableRunnable"),
      mRunnable(std::move(aRunnable)),
      mPriority(aPriority) {
#  if DEBUG
  nsCOMPtr<nsIRunnablePriority> runnablePrio = do_QueryInterface(mRunnable);
  MOZ_ASSERT(!runnablePrio);
#  endif
}

#  ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
NS_IMETHODIMP
PrioritizableRunnable::GetName(nsACString& aName) {
  // Try to get a name from the underlying runnable.
  nsCOMPtr<nsINamed> named = do_QueryInterface(mRunnable);
  if (named) {
    named->GetName(aName);
  }
  return NS_OK;
}
#  endif

NS_IMETHODIMP
PrioritizableRunnable::Run() {
  MOZ_RELEASE_ASSERT(NS_IsMainThread());
  return mRunnable->Run();
}

NS_IMETHODIMP
PrioritizableRunnable::GetPriority(uint32_t* aPriority) {
  *aPriority = mPriority;
  return NS_OK;
}

already_AddRefed<nsIRunnable> mozilla::CreateRenderBlockingRunnable(
    already_AddRefed<nsIRunnable>&& aRunnable) {
  nsCOMPtr<nsIRunnable> runnable = new PrioritizableRunnable(
      std::move(aRunnable), nsIRunnablePriority::PRIORITY_RENDER_BLOCKING);
  return runnable.forget();
}

NS_IMPL_ISUPPORTS_INHERITED(PrioritizableCancelableRunnable, CancelableRunnable,
                            nsIRunnablePriority)

NS_IMETHODIMP
PrioritizableCancelableRunnable::GetPriority(uint32_t* aPriority) {
  *aPriority = mPriority;
  return NS_OK;
}

#endif  // XPCOM_GLUE_AVOID_NSPR

//-----------------------------------------------------------------------------

nsresult NS_NewNamedThread(const nsACString& aName, nsIThread** aResult,
                           nsIRunnable* aInitialEvent,
                           nsIThreadManager::ThreadCreationOptions aOptions) {
  nsCOMPtr<nsIRunnable> event = aInitialEvent;
  return NS_NewNamedThread(aName, aResult, event.forget(), aOptions);
}

nsresult NS_NewNamedThread(const nsACString& aName, nsIThread** aResult,
                           already_AddRefed<nsIRunnable> aInitialEvent,
                           nsIThreadManager::ThreadCreationOptions aOptions) {
  nsCOMPtr<nsIRunnable> event = std::move(aInitialEvent);
  nsCOMPtr<nsIThread> thread;
  nsresult rv = nsThreadManager::get().nsThreadManager::NewNamedThread(
      aName, aOptions, getter_AddRefs(thread));
  if (NS_WARN_IF(NS_FAILED(rv))) {
    return rv;
  }

  if (event) {
    rv = thread->Dispatch(event.forget(), NS_DISPATCH_IGNORE_BLOCK_DISPATCH);
    if (NS_WARN_IF(NS_FAILED(rv))) {
      return rv;
    }
  }

  *aResult = nullptr;
  thread.swap(*aResult);
  return NS_OK;
}

nsresult NS_GetCurrentThread(nsIThread** aResult) {
  return nsThreadManager::get().nsThreadManager::GetCurrentThread(aResult);
}

nsresult NS_GetMainThread(nsIThread** aResult) {
  return nsThreadManager::get().nsThreadManager::GetMainThread(aResult);
}

nsresult NS_DispatchToCurrentThread(already_AddRefed<nsIRunnable>&& aEvent) {
  nsresult rv;
  nsCOMPtr<nsIRunnable> event(aEvent);
  // XXX: Consider using GetCurrentSerialEventTarget() to support TaskQueues.
  nsISerialEventTarget* thread = NS_GetCurrentThread();
  if (!thread) {
    return NS_ERROR_UNEXPECTED;
  }
  // To keep us from leaking the runnable if dispatch method fails,
  // we grab the reference on failures and release it.
  nsIRunnable* temp = event.get();
  rv = thread->Dispatch(event.forget(), NS_DISPATCH_NORMAL);
  if (NS_WARN_IF(NS_FAILED(rv))) {
    // Dispatch() leaked the reference to the event, but due to caller's
    // assumptions, we shouldn't leak here. And given we are on the same
    // thread as the dispatch target, it's mostly safe to do it here.
    NS_RELEASE(temp);
  }
  return rv;
}

// It is common to call NS_DispatchToCurrentThread with a newly
// allocated runnable with a refcount of zero. To keep us from leaking
// the runnable if the dispatch method fails, we take a death grip.
nsresult NS_DispatchToCurrentThread(nsIRunnable* aEvent) {
  nsCOMPtr<nsIRunnable> event(aEvent);
  return NS_DispatchToCurrentThread(event.forget());
}

nsresult NS_DispatchToMainThread(already_AddRefed<nsIRunnable>&& aEvent,
                                 uint32_t aDispatchFlags) {
  LeakRefPtr<nsIRunnable> event(std::move(aEvent));
  nsCOMPtr<nsIThread> thread;
  nsresult rv = NS_GetMainThread(getter_AddRefs(thread));
  if (NS_WARN_IF(NS_FAILED(rv))) {
    NS_ASSERTION(false,
                 "Failed NS_DispatchToMainThread() in shutdown; leaking");
    // NOTE: if you stop leaking here, adjust Promise::MaybeReportRejected(),
    // which assumes a leak here, or split into leaks and no-leaks versions
    return rv;
  }
  return thread->Dispatch(event.take(), aDispatchFlags);
}

// In the case of failure with a newly allocated runnable with a
// refcount of zero, we intentionally leak the runnable, because it is
// likely that the runnable is being dispatched to the main thread
// because it owns main thread only objects, so it is not safe to
// release them here.
nsresult NS_DispatchToMainThread(nsIRunnable* aEvent, uint32_t aDispatchFlags) {
  nsCOMPtr<nsIRunnable> event(aEvent);
  return NS_DispatchToMainThread(event.forget(), aDispatchFlags);
}

nsresult NS_DelayedDispatchToCurrentThread(
    already_AddRefed<nsIRunnable>&& aEvent, uint32_t aDelayMs) {
  nsCOMPtr<nsIRunnable> event(aEvent);

  // XXX: Consider using GetCurrentSerialEventTarget() to support TaskQueues.
  nsISerialEventTarget* thread = NS_GetCurrentThread();
  if (!thread) {
    return NS_ERROR_UNEXPECTED;
  }

  return thread->DelayedDispatch(event.forget(), aDelayMs);
}

nsresult NS_DispatchToThreadQueue(already_AddRefed<nsIRunnable>&& aEvent,
                                  nsIThread* aThread,
                                  EventQueuePriority aQueue) {
  nsresult rv;
  nsCOMPtr<nsIRunnable> event(aEvent);
  NS_ENSURE_TRUE(event, NS_ERROR_INVALID_ARG);
  if (!aThread) {
    return NS_ERROR_UNEXPECTED;
  }
  // To keep us from leaking the runnable if dispatch method fails,
  // we grab the reference on failures and release it.
  nsIRunnable* temp = event.get();
  rv = aThread->DispatchToQueue(event.forget(), aQueue);
  if (NS_WARN_IF(NS_FAILED(rv))) {
    // Dispatch() leaked the reference to the event, but due to caller's
    // assumptions, we shouldn't leak here. And given we are on the same
    // thread as the dispatch target, it's mostly safe to do it here.
    NS_RELEASE(temp);
  }

  return rv;
}

nsresult NS_DispatchToCurrentThreadQueue(already_AddRefed<nsIRunnable>&& aEvent,
                                         EventQueuePriority aQueue) {
  return NS_DispatchToThreadQueue(std::move(aEvent), NS_GetCurrentThread(),
                                  aQueue);
}

extern nsresult NS_DispatchToMainThreadQueue(
    already_AddRefed<nsIRunnable>&& aEvent, EventQueuePriority aQueue) {
  nsCOMPtr<nsIThread> mainThread;
  nsresult rv = NS_GetMainThread(getter_AddRefs(mainThread));
  if (NS_SUCCEEDED(rv)) {
    return NS_DispatchToThreadQueue(std::move(aEvent), mainThread, aQueue);
  }
  return rv;
}

class IdleRunnableWrapper final : public Runnable,
                                  public nsIDiscardableRunnable,
                                  public nsIIdleRunnable {
 public:
  explicit IdleRunnableWrapper(already_AddRefed<nsIRunnable>&& aEvent)
      : Runnable("IdleRunnableWrapper"),
        mRunnable(std::move(aEvent)),
        mDiscardable(do_QueryInterface(mRunnable)) {}

  NS_DECL_ISUPPORTS_INHERITED

  NS_IMETHOD Run() override {
    if (!mRunnable) {
      return NS_OK;
    }
    CancelTimer();
    // Don't clear mDiscardable because that would cause QueryInterface to
    // change behavior during the lifetime of an instance.
    nsCOMPtr<nsIRunnable> runnable = std::move(mRunnable);
    return runnable->Run();
  }

  // nsIDiscardableRunnable
  void OnDiscard() override {
    if (!mRunnable) {
      // Run() was already called from TimedOut().
      return;
    }
    mDiscardable->OnDiscard();
    mRunnable = nullptr;
  }

  static void TimedOut(nsITimer* aTimer, void* aClosure) {
    RefPtr<IdleRunnableWrapper> runnable =
        static_cast<IdleRunnableWrapper*>(aClosure);
    LogRunnable::Run log(runnable);
    runnable->Run();
    runnable = nullptr;
  }

  void SetTimer(uint32_t aDelay, nsIEventTarget* aTarget) override {
    MOZ_ASSERT(aTarget);
    MOZ_ASSERT(!mTimer);
    NS_NewTimerWithFuncCallback(getter_AddRefs(mTimer), TimedOut, this, aDelay,
                                nsITimer::TYPE_ONE_SHOT,
                                "IdleRunnableWrapper::SetTimer", aTarget);
  }

#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
  NS_IMETHOD GetName(nsACString& aName) override {
    aName.AssignLiteral("IdleRunnableWrapper");
    if (nsCOMPtr<nsINamed> named = do_QueryInterface(mRunnable)) {
      nsAutoCString name;
      named->GetName(name);
      if (!name.IsEmpty()) {
        aName.AppendLiteral(" for ");
        aName.Append(name);
      }
    }
    return NS_OK;
  }
#endif

 private:
  ~IdleRunnableWrapper() { CancelTimer(); }

  void CancelTimer() {
    if (mTimer) {
      mTimer->Cancel();
    }
  }

  nsCOMPtr<nsITimer> mTimer;
  nsCOMPtr<nsIRunnable> mRunnable;
  nsCOMPtr<nsIDiscardableRunnable> mDiscardable;
};

NS_IMPL_ADDREF_INHERITED(IdleRunnableWrapper, Runnable)
NS_IMPL_RELEASE_INHERITED(IdleRunnableWrapper, Runnable)

NS_INTERFACE_MAP_BEGIN(IdleRunnableWrapper)
  NS_INTERFACE_MAP_ENTRY(nsIIdleRunnable)
  NS_INTERFACE_MAP_ENTRY_CONDITIONAL(nsIDiscardableRunnable, mDiscardable)
NS_INTERFACE_MAP_END_INHERITING(Runnable)

extern nsresult NS_DispatchToThreadQueue(already_AddRefed<nsIRunnable>&& aEvent,
                                         uint32_t aTimeout, nsIThread* aThread,
                                         EventQueuePriority aQueue) {
  nsCOMPtr<nsIRunnable> event(std::move(aEvent));
  NS_ENSURE_TRUE(event, NS_ERROR_INVALID_ARG);
  MOZ_ASSERT(aQueue == EventQueuePriority::Idle ||
             aQueue == EventQueuePriority::DeferredTimers);
  if (!aThread) {
    return NS_ERROR_UNEXPECTED;
  }

  nsCOMPtr<nsIIdleRunnable> idleEvent = do_QueryInterface(event);

  if (!idleEvent) {
    idleEvent = new IdleRunnableWrapper(event.forget());
    event = do_QueryInterface(idleEvent);
    MOZ_DIAGNOSTIC_ASSERT(event);
  }
  idleEvent->SetTimer(aTimeout, aThread);

  nsresult rv = NS_DispatchToThreadQueue(event.forget(), aThread, aQueue);
  if (NS_SUCCEEDED(rv)) {
    // This is intended to bind with the "DISP" log made from inside
    // NS_DispatchToThreadQueue for the `event`. There is no possibly to inject
    // another "DISP" for a different event on this thread.
    LOG1(("TIMEOUT %u", aTimeout));
  }

  return rv;
}

extern nsresult NS_DispatchToCurrentThreadQueue(
    already_AddRefed<nsIRunnable>&& aEvent, uint32_t aTimeout,
    EventQueuePriority aQueue) {
  return NS_DispatchToThreadQueue(std::move(aEvent), aTimeout,
                                  NS_GetCurrentThread(), aQueue);
}

#ifndef XPCOM_GLUE_AVOID_NSPR
nsresult NS_ProcessPendingEvents(nsIThread* aThread, PRIntervalTime aTimeout) {
  nsresult rv = NS_OK;

  if (!aThread) {
    aThread = NS_GetCurrentThread();
    if (NS_WARN_IF(!aThread)) {
      return NS_ERROR_UNEXPECTED;
    }
  }

  PRIntervalTime start = PR_IntervalNow();
  for (;;) {
    bool processedEvent;
    rv = aThread->ProcessNextEvent(false, &processedEvent);
    if (NS_FAILED(rv) || !processedEvent) {
      break;
    }
    if (PR_IntervalNow() - start > aTimeout) {
      break;
    }
  }
  return rv;
}
#endif  // XPCOM_GLUE_AVOID_NSPR

inline bool hasPendingEvents(nsIThread* aThread) {
  bool val;
  return NS_SUCCEEDED(aThread->HasPendingEvents(&val)) && val;
}

bool NS_HasPendingEvents(nsIThread* aThread) {
  if (!aThread) {
    aThread = NS_GetCurrentThread();
    if (NS_WARN_IF(!aThread)) {
      return false;
    }
  }
  return hasPendingEvents(aThread);
}

bool NS_ProcessNextEvent(nsIThread* aThread, bool aMayWait) {
  if (!aThread) {
    aThread = NS_GetCurrentThread();
    if (NS_WARN_IF(!aThread)) {
      return false;
    }
  }
  bool val;
  return NS_SUCCEEDED(aThread->ProcessNextEvent(aMayWait, &val)) && val;
}

void NS_SetCurrentThreadName(const char* aName) {
  PR_SetCurrentThreadName(aName);
#if defined(ANDROID) && defined(DEBUG)
  // Check nspr does the right thing on Android.
  char buffer[16] = {'\0'};
  prctl(PR_GET_NAME, buffer);
  MOZ_ASSERT(0 == strncmp(buffer, aName, 15));
#endif
  if (nsThreadManager::get().IsNSThread()) {
    nsThread* thread = nsThreadManager::get().GetCurrentThread();
    thread->SetThreadNameInternal(nsDependentCString(aName));
  }
}

nsIThread* NS_GetCurrentThread() {
  return nsThreadManager::get().GetCurrentThread();
}

nsIThread* NS_GetCurrentThreadNoCreate() {
  if (nsThreadManager::get().IsNSThread()) {
    return NS_GetCurrentThread();
  }
  return nullptr;
}

// nsThreadPoolNaming
nsCString nsThreadPoolNaming::GetNextThreadName(const nsACString& aPoolName) {
  nsCString name(aPoolName);
  name.AppendLiteral(" #");
  name.AppendInt(++mCounter, 10);  // The counter is declared as atomic
  return name;
}

nsresult NS_DispatchBackgroundTask(already_AddRefed<nsIRunnable> aEvent,
                                   uint32_t aDispatchFlags) {
  nsCOMPtr<nsIRunnable> event(aEvent);
  return nsThreadManager::get().DispatchToBackgroundThread(event,
                                                           aDispatchFlags);
}

// nsAutoLowPriorityIO
nsAutoLowPriorityIO::nsAutoLowPriorityIO() {
#if defined(XP_WIN)
  lowIOPrioritySet =
      SetThreadPriority(GetCurrentThread(), THREAD_MODE_BACKGROUND_BEGIN);
#elif defined(XP_MACOSX)
  oldPriority = getiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_THREAD);
  lowIOPrioritySet =
      oldPriority != -1 &&
      setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_THREAD, IOPOL_THROTTLE) != -1;
#else
  lowIOPrioritySet = false;
#endif
}

nsAutoLowPriorityIO::~nsAutoLowPriorityIO() {
#if defined(XP_WIN)
  if (MOZ_LIKELY(lowIOPrioritySet)) {
    // On Windows the old thread priority is automatically restored
    SetThreadPriority(GetCurrentThread(), THREAD_MODE_BACKGROUND_END);
  }
#elif defined(XP_MACOSX)
  if (MOZ_LIKELY(lowIOPrioritySet)) {
    setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_THREAD, oldPriority);
  }
#endif
}

namespace mozilla {

nsISerialEventTarget* GetCurrentSerialEventTarget() {
  if (nsISerialEventTarget* current =
          SerialEventTargetGuard::GetCurrentSerialEventTarget()) {
    return current;
  }

  MOZ_DIAGNOSTIC_ASSERT(!nsThreadPool::GetCurrentThreadPool(),
                        "Call to GetCurrentSerialEventTarget() from thread "
                        "pool without an active TaskQueue");

  nsCOMPtr<nsIThread> thread;
  nsresult rv = NS_GetCurrentThread(getter_AddRefs(thread));
  if (NS_FAILED(rv)) {
    return nullptr;
  }

  return thread;
}

nsISerialEventTarget* GetMainThreadSerialEventTarget() {
  return static_cast<nsThread*>(nsThreadManager::get().GetMainThreadWeak());
}

size_t GetNumberOfProcessors() {
#if defined(XP_LINUX) && defined(MOZ_SANDBOX)
  static const PRInt32 procs = PR_GetNumberOfProcessors();
#else
  PRInt32 procs = PR_GetNumberOfProcessors();
#endif
  MOZ_ASSERT(procs > 0);
  return static_cast<size_t>(procs);
}

template <typename T>
void LogTaskBase<T>::LogDispatch(T* aEvent) {
  LOG1(("DISP %p", aEvent));
}
template <typename T>
void LogTaskBase<T>::LogDispatch(T* aEvent, void* aContext) {
  LOG1(("DISP %p (%p)", aEvent, aContext));
}

template <>
void LogTaskBase<IPC::Message>::LogDispatchWithPid(IPC::Message* aEvent,
                                                   int32_t aPid) {
  if (aEvent->seqno() && aPid > 0) {
    LOG1(("SEND %p %d %d", aEvent, aEvent->seqno(), aPid));
  }
}

template <typename T>
LogTaskBase<T>::Run::Run(T* aEvent, bool aWillRunAgain)
    : mWillRunAgain(aWillRunAgain) {
  // Logging address of this RAII so that we can use it to identify the DONE log
  // while not keeping any ref to the event that could be invalid at the dtor
  // time.
  LOG1(("EXEC %p %p", aEvent, this));
}
template <typename T>
LogTaskBase<T>::Run::Run(T* aEvent, void* aContext, bool aWillRunAgain)
    : mWillRunAgain(aWillRunAgain) {
  LOG1(("EXEC %p (%p) %p", aEvent, aContext, this));
}

template <>
LogTaskBase<nsIRunnable>::Run::Run(nsIRunnable* aEvent, bool aWillRunAgain)
    : mWillRunAgain(aWillRunAgain) {
  if (!LOG1_ENABLED()) {
    return;
  }

  nsCOMPtr<nsINamed> named(do_QueryInterface(aEvent));
  if (!named) {
    LOG1(("EXEC %p %p", aEvent, this));
    return;
  }

  nsAutoCString name;
  named->GetName(name);
  LOG1(("EXEC %p %p [%s]", aEvent, this, name.BeginReading()));
}

template <>
LogTaskBase<Task>::Run::Run(Task* aTask, bool aWillRunAgain)
    : mWillRunAgain(aWillRunAgain) {
  if (!LOG1_ENABLED()) {
    return;
  }

  nsAutoCString name;
  if (!aTask->GetName(name)) {
    LOG1(("EXEC %p %p", aTask, this));
    return;
  }

  LOG1(("EXEC %p %p [%s]", aTask, this, name.BeginReading()));
}

template <>
LogTaskBase<IPC::Message>::Run::Run(IPC::Message* aMessage, bool aWillRunAgain)
    : mWillRunAgain(aWillRunAgain) {
  LOG1(("RECV %p %p %d [%s]", aMessage, this, aMessage->seqno(),
        aMessage->name()));
}

template <>
LogTaskBase<nsTimerImpl>::Run::Run(nsTimerImpl* aEvent, bool aWillRunAgain)
    : mWillRunAgain(aWillRunAgain) {
  // The name of the timer will be logged when running it on the target thread.
  // Logging it here (on the `Timer` thread) would be redundant.
  LOG1(("EXEC %p %p [nsTimerImpl]", aEvent, this));
}

template <typename T>
LogTaskBase<T>::Run::~Run() {
  LOG1((mWillRunAgain ? "INTERRUPTED %p" : "DONE %p", this));
}

template class LogTaskBase<nsIRunnable>;
template class LogTaskBase<MicroTaskRunnable>;
template class LogTaskBase<IPC::Message>;
template class LogTaskBase<nsTimerImpl>;
template class LogTaskBase<Task>;
template class LogTaskBase<PresShell>;
template class LogTaskBase<dom::FrameRequestCallback>;

MOZ_THREAD_LOCAL(nsISerialEventTarget*)
SerialEventTargetGuard::sCurrentThreadTLS;
void SerialEventTargetGuard::InitTLS() {
  MOZ_ASSERT(NS_IsMainThread());
  if (!sCurrentThreadTLS.init()) {
    MOZ_CRASH();
  }
}

}  // namespace mozilla

bool nsIEventTarget::IsOnCurrentThread() {
  if (mThread) {
    return mThread == PR_GetCurrentThread();
  }
  return IsOnCurrentThreadInfallible();
}

extern "C" {
// These functions use the C language linkage because they're exposed to Rust
// via the xpcom/rust/moz_task crate, which wraps them in safe Rust functions
// that enable Rust code to get/create threads and dispatch runnables on them.

nsresult NS_GetCurrentThreadRust(nsIThread** aResult) {
  return NS_GetCurrentThread(aResult);
}

nsresult NS_GetMainThreadRust(nsIThread** aResult) {
  return NS_GetMainThread(aResult);
}

// NS_NewNamedThread's aStackSize parameter has the default argument
// nsIThreadManager::DEFAULT_STACK_SIZE, but we can't omit default arguments
// when calling a C++ function from Rust, and we can't access
// nsIThreadManager::DEFAULT_STACK_SIZE in Rust to pass it explicitly,
// since it is defined in a %{C++ ... %} block within nsIThreadManager.idl.
// So we indirect through this function.
nsresult NS_NewNamedThreadWithDefaultStackSize(const nsACString& aName,
                                               nsIThread** aResult,
                                               nsIRunnable* aEvent) {
  return NS_NewNamedThread(aName, aResult, aEvent);
}

bool NS_IsOnCurrentThread(nsIEventTarget* aTarget) {
  return aTarget->IsOnCurrentThread();
}

nsresult NS_DispatchBackgroundTask(nsIRunnable* aEvent,
                                   uint32_t aDispatchFlags) {
  return nsThreadManager::get().DispatchToBackgroundThread(aEvent,
                                                           aDispatchFlags);
}

nsresult NS_CreateBackgroundTaskQueue(const char* aName,
                                      nsISerialEventTarget** aTarget) {
  nsCOMPtr<nsISerialEventTarget> target =
      nsThreadManager::get().CreateBackgroundTaskQueue(aName);
  if (!target) {
    return NS_ERROR_FAILURE;
  }

  target.forget(aTarget);
  return NS_OK;
}

}  // extern "C"

nsresult NS_DispatchAndSpinEventLoopUntilComplete(
    const nsACString& aVeryGoodReasonToDoThis, nsIEventTarget* aEventTarget,
    already_AddRefed<nsIRunnable> aEvent) {
  // NOTE: Get the current thread specifically, as `SpinEventLoopUntil` can
  // only spin that event target's loop. The reply will specify
  // NS_DISPATCH_IGNORE_BLOCK_DISPATCH to ensure the reply is received even if
  // the caller is a threadpool thread.
  nsCOMPtr<nsIThread> current = NS_GetCurrentThread();
  if (NS_WARN_IF(!current)) {
    return NS_ERROR_NOT_AVAILABLE;
  }

  RefPtr<nsThreadSyncDispatch> wrapper =
      new nsThreadSyncDispatch(current.forget(), std::move(aEvent));
  nsresult rv = aEventTarget->Dispatch(do_AddRef(wrapper));
  if (NS_WARN_IF(NS_FAILED(rv))) {
    // FIXME: Consider avoiding leaking the `nsThreadSyncDispatch` as well by
    // using a fallible version of `Dispatch` once that is added.
    return rv;
  }

  wrapper->SpinEventLoopUntilComplete(aVeryGoodReasonToDoThis);
  return NS_OK;
}