summaryrefslogtreecommitdiffstats
path: root/security/sandbox/chromium/base/synchronization
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /security/sandbox/chromium/base/synchronization
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'security/sandbox/chromium/base/synchronization')
-rw-r--r--security/sandbox/chromium/base/synchronization/atomic_flag.h50
-rw-r--r--security/sandbox/chromium/base/synchronization/condition_variable.h135
-rw-r--r--security/sandbox/chromium/base/synchronization/condition_variable_posix.cc149
-rw-r--r--security/sandbox/chromium/base/synchronization/lock.cc38
-rw-r--r--security/sandbox/chromium/base/synchronization/lock.h133
-rw-r--r--security/sandbox/chromium/base/synchronization/lock_impl.h175
-rw-r--r--security/sandbox/chromium/base/synchronization/lock_impl_posix.cc133
-rw-r--r--security/sandbox/chromium/base/synchronization/lock_impl_win.cc40
-rw-r--r--security/sandbox/chromium/base/synchronization/waitable_event.h291
-rw-r--r--security/sandbox/chromium/base/synchronization/waitable_event_posix.cc445
10 files changed, 1589 insertions, 0 deletions
diff --git a/security/sandbox/chromium/base/synchronization/atomic_flag.h b/security/sandbox/chromium/base/synchronization/atomic_flag.h
new file mode 100644
index 0000000000..f386a16cbd
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/atomic_flag.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
+#define BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
+
+#include <stdint.h>
+
+#include <atomic>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/sequence_checker.h"
+
+namespace base {
+
+// A flag that can safely be set from one thread and read from other threads.
+//
+// This class IS NOT intended for synchronization between threads.
+class BASE_EXPORT AtomicFlag {
+ public:
+ AtomicFlag();
+ ~AtomicFlag();
+
+ // Set the flag. Must always be called from the same sequence.
+ void Set();
+
+ // Returns true iff the flag was set. If this returns true, the current thread
+ // is guaranteed to be synchronized with all memory operations on the sequence
+ // which invoked Set() up until at least the first call to Set() on it.
+ bool IsSet() const {
+ // Inline here: this has a measurable performance impact on base::WeakPtr.
+ return flag_.load(std::memory_order_acquire) != 0;
+ }
+
+ // Resets the flag. Be careful when using this: callers might not expect
+ // IsSet() to return false after returning true once.
+ void UnsafeResetForTesting();
+
+ private:
+ std::atomic<uint_fast8_t> flag_{0};
+ SEQUENCE_CHECKER(set_sequence_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(AtomicFlag);
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
diff --git a/security/sandbox/chromium/base/synchronization/condition_variable.h b/security/sandbox/chromium/base/synchronization/condition_variable.h
new file mode 100644
index 0000000000..d92b738081
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/condition_variable.h
@@ -0,0 +1,135 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ConditionVariable wraps pthreads condition variable synchronization or, on
+// Windows, simulates it. This functionality is very helpful for having
+// several threads wait for an event, as is common with a thread pool managed
+// by a master. The meaning of such an event in the (worker) thread pool
+// scenario is that additional tasks are now available for processing. It is
+// used in Chrome in the DNS prefetching system to notify worker threads that
+// a queue now has items (tasks) which need to be tended to. A related use
+// would have a pool manager waiting on a ConditionVariable, waiting for a
+// thread in the pool to announce (signal) that there is now more room in a
+// (bounded size) communications queue for the manager to deposit tasks, or,
+// as a second example, that the queue of tasks is completely empty and all
+// workers are waiting.
+//
+// USAGE NOTE 1: spurious signal events are possible with this and
+// most implementations of condition variables. As a result, be
+// *sure* to retest your condition before proceeding. The following
+// is a good example of doing this correctly:
+//
+// while (!work_to_be_done()) Wait(...);
+//
+// In contrast do NOT do the following:
+//
+// if (!work_to_be_done()) Wait(...); // Don't do this.
+//
+// Especially avoid the above if you are relying on some other thread only
+// issuing a signal up *if* there is work-to-do. There can/will
+// be spurious signals. Recheck state on waiting thread before
+// assuming the signal was intentional. Caveat caller ;-).
+//
+// USAGE NOTE 2: Broadcast() frees up all waiting threads at once,
+// which leads to contention for the locks they all held when they
+// called Wait(). This results in POOR performance. A much better
+// approach to getting a lot of threads out of Wait() is to have each
+// thread (upon exiting Wait()) call Signal() to free up another
+// Wait'ing thread. Look at condition_variable_unittest.cc for
+// both examples.
+//
+// Broadcast() can be used nicely during teardown, as it gets the job
+// done, and leaves no sleeping threads... and performance is less
+// critical at that point.
+//
+// The semantics of Broadcast() are carefully crafted so that *all*
+// threads that were waiting when the request was made will indeed
+// get signaled. Some implementations mess up, and don't signal them
+// all, while others allow the wait to be effectively turned off (for
+// a while while waiting threads come around). This implementation
+// appears correct, as it will not "lose" any signals, and will guarantee
+// that all threads get signaled by Broadcast().
+//
+// This implementation offers support for "performance" in its selection of
+// which thread to revive. Performance, in direct contrast with "fairness,"
+// assures that the thread that most recently began to Wait() is selected by
+// Signal to revive. Fairness would (if publicly supported) assure that the
+// thread that has Wait()ed the longest is selected. The default policy
+// may improve performance, as the selected thread may have a greater chance of
+// having some of its stack data in various CPU caches.
+
+#ifndef BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
+#define BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <pthread.h>
+#endif
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#endif
+
+namespace base {
+
+class TimeDelta;
+
+class BASE_EXPORT ConditionVariable {
+ public:
+ // Construct a cv for use with ONLY one user lock.
+ explicit ConditionVariable(Lock* user_lock);
+
+ ~ConditionVariable();
+
+ // Wait() releases the caller's critical section atomically as it starts to
+ // sleep, and the reacquires it when it is signaled. The wait functions are
+ // susceptible to spurious wakeups. (See usage note 1 for more details.)
+ void Wait();
+ void TimedWait(const TimeDelta& max_time);
+
+ // Broadcast() revives all waiting threads. (See usage note 2 for more
+ // details.)
+ void Broadcast();
+ // Signal() revives one waiting thread.
+ void Signal();
+
+ // Declares that this ConditionVariable will only ever be used by a thread
+ // that is idle at the bottom of its stack and waiting for work (in
+ // particular, it is not synchronously waiting on this ConditionVariable
+ // before resuming ongoing work). This is useful to avoid telling
+ // base-internals that this thread is "blocked" when it's merely idle and
+ // ready to do work. As such, this is only expected to be used by thread and
+ // thread pool impls.
+ void declare_only_used_while_idle() { waiting_is_blocking_ = false; }
+
+ private:
+
+#if defined(OS_WIN)
+ CHROME_CONDITION_VARIABLE cv_;
+ CHROME_SRWLOCK* const srwlock_;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ pthread_cond_t condition_;
+ pthread_mutex_t* user_mutex_;
+#endif
+
+#if DCHECK_IS_ON()
+ base::Lock* const user_lock_; // Needed to adjust shadow lock state on wait.
+#endif
+
+ // Whether a thread invoking Wait() on this ConditionalVariable should be
+ // considered blocked as opposed to idle (and potentially replaced if part of
+ // a pool).
+ bool waiting_is_blocking_ = true;
+
+ DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
diff --git a/security/sandbox/chromium/base/synchronization/condition_variable_posix.cc b/security/sandbox/chromium/base/synchronization/condition_variable_posix.cc
new file mode 100644
index 0000000000..189eb360d2
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/condition_variable_posix.cc
@@ -0,0 +1,149 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/condition_variable.h"
+
+#include <errno.h>
+#include <stdint.h>
+#include <sys/time.h>
+
+#include "base/optional.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+ConditionVariable::ConditionVariable(Lock* user_lock)
+ : user_mutex_(user_lock->lock_.native_handle())
+#if DCHECK_IS_ON()
+ , user_lock_(user_lock)
+#endif
+{
+ int rv = 0;
+ // http://crbug.com/293736
+ // NaCl doesn't support monotonic clock based absolute deadlines.
+ // On older Android platform versions, it's supported through the
+ // non-standard pthread_cond_timedwait_monotonic_np. Newer platform
+ // versions have pthread_condattr_setclock.
+ // Mac can use relative time deadlines.
+#if !defined(OS_MACOSX) && !defined(OS_NACL) && \
+ !(defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC))
+ pthread_condattr_t attrs;
+ rv = pthread_condattr_init(&attrs);
+ DCHECK_EQ(0, rv);
+ pthread_condattr_setclock(&attrs, CLOCK_MONOTONIC);
+ rv = pthread_cond_init(&condition_, &attrs);
+ pthread_condattr_destroy(&attrs);
+#else
+ rv = pthread_cond_init(&condition_, NULL);
+#endif
+ DCHECK_EQ(0, rv);
+}
+
+ConditionVariable::~ConditionVariable() {
+#if defined(OS_MACOSX)
+ // This hack is necessary to avoid a fatal pthreads subsystem bug in the
+ // Darwin kernel. http://crbug.com/517681.
+ {
+ base::Lock lock;
+ base::AutoLock l(lock);
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+ pthread_cond_timedwait_relative_np(&condition_, lock.lock_.native_handle(),
+ &ts);
+ }
+#endif
+
+ int rv = pthread_cond_destroy(&condition_);
+ DCHECK_EQ(0, rv);
+}
+
+void ConditionVariable::Wait() {
+ Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
+ scoped_blocking_call;
+ if (waiting_is_blocking_)
+ scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
+
+#if DCHECK_IS_ON()
+ user_lock_->CheckHeldAndUnmark();
+#endif
+ int rv = pthread_cond_wait(&condition_, user_mutex_);
+ DCHECK_EQ(0, rv);
+#if DCHECK_IS_ON()
+ user_lock_->CheckUnheldAndMark();
+#endif
+}
+
+void ConditionVariable::TimedWait(const TimeDelta& max_time) {
+ Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
+ scoped_blocking_call;
+ if (waiting_is_blocking_)
+ scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
+
+ int64_t usecs = max_time.InMicroseconds();
+ struct timespec relative_time;
+ relative_time.tv_sec = usecs / Time::kMicrosecondsPerSecond;
+ relative_time.tv_nsec =
+ (usecs % Time::kMicrosecondsPerSecond) * Time::kNanosecondsPerMicrosecond;
+
+#if DCHECK_IS_ON()
+ user_lock_->CheckHeldAndUnmark();
+#endif
+
+#if defined(OS_MACOSX)
+ int rv = pthread_cond_timedwait_relative_np(
+ &condition_, user_mutex_, &relative_time);
+#else
+ // The timeout argument to pthread_cond_timedwait is in absolute time.
+ struct timespec absolute_time;
+#if defined(OS_NACL)
+ // See comment in constructor for why this is different in NaCl.
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ absolute_time.tv_sec = now.tv_sec;
+ absolute_time.tv_nsec = now.tv_usec * Time::kNanosecondsPerMicrosecond;
+#else
+ struct timespec now;
+ clock_gettime(CLOCK_MONOTONIC, &now);
+ absolute_time.tv_sec = now.tv_sec;
+ absolute_time.tv_nsec = now.tv_nsec;
+#endif
+
+ absolute_time.tv_sec += relative_time.tv_sec;
+ absolute_time.tv_nsec += relative_time.tv_nsec;
+ absolute_time.tv_sec += absolute_time.tv_nsec / Time::kNanosecondsPerSecond;
+ absolute_time.tv_nsec %= Time::kNanosecondsPerSecond;
+ DCHECK_GE(absolute_time.tv_sec, now.tv_sec); // Overflow paranoia
+
+#if defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
+ int rv = pthread_cond_timedwait_monotonic_np(
+ &condition_, user_mutex_, &absolute_time);
+#else
+ int rv = pthread_cond_timedwait(&condition_, user_mutex_, &absolute_time);
+#endif // OS_ANDROID && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
+#endif // OS_MACOSX
+
+ // On failure, we only expect the CV to timeout. Any other error value means
+ // that we've unexpectedly woken up.
+ DCHECK(rv == 0 || rv == ETIMEDOUT);
+#if DCHECK_IS_ON()
+ user_lock_->CheckUnheldAndMark();
+#endif
+}
+
+void ConditionVariable::Broadcast() {
+ int rv = pthread_cond_broadcast(&condition_);
+ DCHECK_EQ(0, rv);
+}
+
+void ConditionVariable::Signal() {
+ int rv = pthread_cond_signal(&condition_);
+ DCHECK_EQ(0, rv);
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/synchronization/lock.cc b/security/sandbox/chromium/base/synchronization/lock.cc
new file mode 100644
index 0000000000..03297ada52
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/lock.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is used for debugging assertion support. The Lock class
+// is functionally a wrapper around the LockImpl class, so the only
+// real intelligence in the class is in the debugging logic.
+
+#include "base/synchronization/lock.h"
+
+#if DCHECK_IS_ON()
+
+namespace base {
+
+Lock::Lock() : lock_() {
+}
+
+Lock::~Lock() {
+ DCHECK(owning_thread_ref_.is_null());
+}
+
+void Lock::AssertAcquired() const {
+ DCHECK(owning_thread_ref_ == PlatformThread::CurrentRef());
+}
+
+void Lock::CheckHeldAndUnmark() {
+ DCHECK(owning_thread_ref_ == PlatformThread::CurrentRef());
+ owning_thread_ref_ = PlatformThreadRef();
+}
+
+void Lock::CheckUnheldAndMark() {
+ DCHECK(owning_thread_ref_.is_null());
+ owning_thread_ref_ = PlatformThread::CurrentRef();
+}
+
+} // namespace base
+
+#endif // DCHECK_IS_ON()
diff --git a/security/sandbox/chromium/base/synchronization/lock.h b/security/sandbox/chromium/base/synchronization/lock.h
new file mode 100644
index 0000000000..00095ab3af
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/lock.h
@@ -0,0 +1,133 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_LOCK_H_
+#define BASE_SYNCHRONIZATION_LOCK_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/lock_impl.h"
+#include "base/thread_annotations.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// A convenient wrapper for an OS specific critical section. The only real
+// intelligence in this class is in debug mode for the support for the
+// AssertAcquired() method.
+class LOCKABLE BASE_EXPORT Lock {
+ public:
+#if !DCHECK_IS_ON()
+ // Optimized wrapper implementation
+ Lock() : lock_() {}
+ ~Lock() {}
+
+ // TODO(lukasza): https://crbug.com/831825: Add EXCLUSIVE_LOCK_FUNCTION
+ // annotation to Acquire method and similar annotations to Release and Try
+ // methods (here and in the #else branch).
+ void Acquire() { lock_.Lock(); }
+ void Release() { lock_.Unlock(); }
+
+ // If the lock is not held, take it and return true. If the lock is already
+ // held by another thread, immediately return false. This must not be called
+ // by a thread already holding the lock (what happens is undefined and an
+ // assertion may fail).
+ bool Try() { return lock_.Try(); }
+
+ // Null implementation if not debug.
+ void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
+#else
+ Lock();
+ ~Lock();
+
+ // NOTE: We do not permit recursive locks and will commonly fire a DCHECK() if
+ // a thread attempts to acquire the lock a second time (while already holding
+ // it).
+ void Acquire() {
+ lock_.Lock();
+ CheckUnheldAndMark();
+ }
+ void Release() {
+ CheckHeldAndUnmark();
+ lock_.Unlock();
+ }
+
+ bool Try() {
+ bool rv = lock_.Try();
+ if (rv) {
+ CheckUnheldAndMark();
+ }
+ return rv;
+ }
+
+ void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK();
+#endif // DCHECK_IS_ON()
+
+ // Whether Lock mitigates priority inversion when used from different thread
+ // priorities.
+ static bool HandlesMultipleThreadPriorities() {
+#if defined(OS_WIN)
+ // Windows mitigates priority inversion by randomly boosting the priority of
+ // ready threads.
+ // https://msdn.microsoft.com/library/windows/desktop/ms684831.aspx
+ return true;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ // POSIX mitigates priority inversion by setting the priority of a thread
+ // holding a Lock to the maximum priority of any other thread waiting on it.
+ return internal::LockImpl::PriorityInheritanceAvailable();
+#else
+#error Unsupported platform
+#endif
+ }
+
+ // Both Windows and POSIX implementations of ConditionVariable need to be
+ // able to see our lock and tweak our debugging counters, as they release and
+ // acquire locks inside of their condition variable APIs.
+ friend class ConditionVariable;
+
+ private:
+#if DCHECK_IS_ON()
+ // Members and routines taking care of locks assertions.
+ // Note that this checks for recursive locks and allows them
+ // if the variable is set. This is allowed by the underlying implementation
+ // on windows but not on Posix, so we're doing unneeded checks on Posix.
+ // It's worth it to share the code.
+ void CheckHeldAndUnmark();
+ void CheckUnheldAndMark();
+
+ // All private data is implicitly protected by lock_.
+ // Be VERY careful to only access members under that lock.
+ base::PlatformThreadRef owning_thread_ref_;
+#endif // DCHECK_IS_ON()
+
+ // Platform specific underlying lock implementation.
+ internal::LockImpl lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(Lock);
+};
+
+// A helper class that acquires the given Lock while the AutoLock is in scope.
+using AutoLock = internal::BasicAutoLock<Lock>;
+
+// AutoUnlock is a helper that will Release() the |lock| argument in the
+// constructor, and re-Acquire() it in the destructor.
+using AutoUnlock = internal::BasicAutoUnlock<Lock>;
+
+// Like AutoLock but is a no-op when the provided Lock* is null. Inspired from
+// absl::MutexLockMaybe. Use this instead of base::Optional<base::AutoLock> to
+// get around -Wthread-safety-analysis warnings for conditional locking.
+using AutoLockMaybe = internal::BasicAutoLockMaybe<Lock>;
+
+// Like AutoLock but permits Release() of its mutex before destruction.
+// Release() may be called at most once. Inspired from
+// absl::ReleasableMutexLock. Use this instead of base::Optional<base::AutoLock>
+// to get around -Wthread-safety-analysis warnings for AutoLocks that are
+// explicitly released early (prefer proper scoping to this).
+using ReleasableAutoLock = internal::BasicReleasableAutoLock<Lock>;
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_LOCK_H_
diff --git a/security/sandbox/chromium/base/synchronization/lock_impl.h b/security/sandbox/chromium/base/synchronization/lock_impl.h
new file mode 100644
index 0000000000..830b878e8e
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/lock_impl.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_LOCK_IMPL_H_
+#define BASE_SYNCHRONIZATION_LOCK_IMPL_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/thread_annotations.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <errno.h>
+#include <pthread.h>
+#endif
+
+namespace base {
+namespace internal {
+
+// This class implements the underlying platform-specific spin-lock mechanism
+// used for the Lock class. Most users should not use LockImpl directly, but
+// should instead use Lock.
+class BASE_EXPORT LockImpl {
+ public:
+#if defined(OS_WIN)
+ using NativeHandle = CHROME_SRWLOCK;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ using NativeHandle = pthread_mutex_t;
+#endif
+
+ LockImpl();
+ ~LockImpl();
+
+ // If the lock is not held, take it and return true. If the lock is already
+ // held by something else, immediately return false.
+ bool Try();
+
+ // Take the lock, blocking until it is available if necessary.
+ void Lock();
+
+ // Release the lock. This must only be called by the lock's holder: after
+ // a successful call to Try, or a call to Lock.
+ inline void Unlock();
+
+ // Return the native underlying lock.
+ // TODO(awalker): refactor lock and condition variables so that this is
+ // unnecessary.
+ NativeHandle* native_handle() { return &native_handle_; }
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+ // Whether this lock will attempt to use priority inheritance.
+ static bool PriorityInheritanceAvailable();
+#endif
+
+ private:
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(LockImpl);
+};
+
+#if defined(OS_WIN)
+void LockImpl::Unlock() {
+ ::ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&native_handle_));
+}
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+void LockImpl::Unlock() {
+ int rv = pthread_mutex_unlock(&native_handle_);
+ DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+}
+#endif
+
+// This is an implementation used for AutoLock templated on the lock type.
+template <class LockType>
+class SCOPED_LOCKABLE BasicAutoLock {
+ public:
+ struct AlreadyAcquired {};
+
+ explicit BasicAutoLock(LockType& lock) EXCLUSIVE_LOCK_FUNCTION(lock)
+ : lock_(lock) {
+ lock_.Acquire();
+ }
+
+ BasicAutoLock(LockType& lock, const AlreadyAcquired&)
+ EXCLUSIVE_LOCKS_REQUIRED(lock)
+ : lock_(lock) {
+ lock_.AssertAcquired();
+ }
+
+ ~BasicAutoLock() UNLOCK_FUNCTION() {
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ private:
+ LockType& lock_;
+ DISALLOW_COPY_AND_ASSIGN(BasicAutoLock);
+};
+
+// This is an implementation used for AutoUnlock templated on the lock type.
+template <class LockType>
+class BasicAutoUnlock {
+ public:
+ explicit BasicAutoUnlock(LockType& lock) : lock_(lock) {
+ // We require our caller to have the lock.
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ ~BasicAutoUnlock() { lock_.Acquire(); }
+
+ private:
+ LockType& lock_;
+ DISALLOW_COPY_AND_ASSIGN(BasicAutoUnlock);
+};
+
+// This is an implementation used for AutoLockMaybe templated on the lock type.
+template <class LockType>
+class SCOPED_LOCKABLE BasicAutoLockMaybe {
+ public:
+ explicit BasicAutoLockMaybe(LockType* lock) EXCLUSIVE_LOCK_FUNCTION(lock)
+ : lock_(lock) {
+ if (lock_)
+ lock_->Acquire();
+ }
+
+ ~BasicAutoLockMaybe() UNLOCK_FUNCTION() {
+ if (lock_) {
+ lock_->AssertAcquired();
+ lock_->Release();
+ }
+ }
+
+ private:
+ LockType* const lock_;
+ DISALLOW_COPY_AND_ASSIGN(BasicAutoLockMaybe);
+};
+
+// This is an implementation used for ReleasableAutoLock templated on the lock
+// type.
+template <class LockType>
+class SCOPED_LOCKABLE BasicReleasableAutoLock {
+ public:
+ explicit BasicReleasableAutoLock(LockType* lock) EXCLUSIVE_LOCK_FUNCTION(lock)
+ : lock_(lock) {
+ DCHECK(lock_);
+ lock_->Acquire();
+ }
+
+ ~BasicReleasableAutoLock() UNLOCK_FUNCTION() {
+ if (lock_) {
+ lock_->AssertAcquired();
+ lock_->Release();
+ }
+ }
+
+ void Release() UNLOCK_FUNCTION() {
+ DCHECK(lock_);
+ lock_->AssertAcquired();
+ lock_->Release();
+ lock_ = nullptr;
+ }
+
+ private:
+ LockType* lock_;
+ DISALLOW_COPY_AND_ASSIGN(BasicReleasableAutoLock);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_LOCK_IMPL_H_
diff --git a/security/sandbox/chromium/base/synchronization/lock_impl_posix.cc b/security/sandbox/chromium/base/synchronization/lock_impl_posix.cc
new file mode 100644
index 0000000000..7571f68a9a
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/lock_impl_posix.cc
@@ -0,0 +1,133 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/lock_impl.h"
+
+#include <string>
+
+#include "base/debug/activity_tracker.h"
+#include "base/logging.h"
+#include "base/posix/safe_strerror.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/synchronization_buildflags.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+#if DCHECK_IS_ON()
+const char* AdditionalHintForSystemErrorCode(int error_code) {
+ switch (error_code) {
+ case EINVAL:
+ return "Hint: This is often related to a use-after-free.";
+ default:
+ return "";
+ }
+}
+#endif // DCHECK_IS_ON()
+
+std::string SystemErrorCodeToString(int error_code) {
+#if DCHECK_IS_ON()
+ return base::safe_strerror(error_code) + ". " +
+ AdditionalHintForSystemErrorCode(error_code);
+#else // DCHECK_IS_ON()
+ return std::string();
+#endif // DCHECK_IS_ON()
+}
+
+} // namespace
+
+// Determines which platforms can consider using priority inheritance locks. Use
+// this define for platform code that may not compile if priority inheritance
+// locks aren't available. For this platform code,
+// PRIORITY_INHERITANCE_LOCKS_POSSIBLE() is a necessary but insufficient check.
+// Lock::PriorityInheritanceAvailable still must be checked as the code may
+// compile but the underlying platform still may not correctly support priority
+// inheritance locks.
+#if defined(OS_NACL) || defined(OS_ANDROID) || defined(OS_FUCHSIA)
+#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 0
+#else
+#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 1
+#endif
+
+LockImpl::LockImpl() {
+ pthread_mutexattr_t mta;
+ int rv = pthread_mutexattr_init(&mta);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+#if PRIORITY_INHERITANCE_LOCKS_POSSIBLE()
+ if (PriorityInheritanceAvailable()) {
+ rv = pthread_mutexattr_setprotocol(&mta, PTHREAD_PRIO_INHERIT);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+ }
+#endif
+#ifndef NDEBUG
+ // In debug, setup attributes for lock error checking.
+ rv = pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_ERRORCHECK);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+#endif
+ rv = pthread_mutex_init(&native_handle_, &mta);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+ rv = pthread_mutexattr_destroy(&mta);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+}
+
+LockImpl::~LockImpl() {
+ int rv = pthread_mutex_destroy(&native_handle_);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+}
+
+bool LockImpl::Try() {
+ int rv = pthread_mutex_trylock(&native_handle_);
+ DCHECK(rv == 0 || rv == EBUSY) << ". " << SystemErrorCodeToString(rv);
+ return rv == 0;
+}
+
+void LockImpl::Lock() {
+ // The ScopedLockAcquireActivity below is relatively expensive and so its
+ // actions can become significant due to the very large number of locks
+ // that tend to be used throughout the build. To avoid this cost in the
+ // vast majority of the calls, simply "try" the lock first and only do the
+ // (tracked) blocking call if that fails. Since "try" itself is a system
+ // call, and thus also somewhat expensive, don't bother with it unless
+ // tracking is actually enabled.
+ if (base::debug::GlobalActivityTracker::IsEnabled())
+ if (Try())
+ return;
+
+ base::debug::ScopedLockAcquireActivity lock_activity(this);
+ int rv = pthread_mutex_lock(&native_handle_);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+}
+
+// static
+bool LockImpl::PriorityInheritanceAvailable() {
+#if BUILDFLAG(ENABLE_MUTEX_PRIORITY_INHERITANCE)
+ return true;
+#elif PRIORITY_INHERITANCE_LOCKS_POSSIBLE() && defined(OS_MACOSX)
+ return true;
+#else
+ // Security concerns prevent the use of priority inheritance mutexes on Linux.
+ // * CVE-2010-0622 - Linux < 2.6.33-rc7, wake_futex_pi possible DoS.
+ // https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0622
+ // * CVE-2012-6647 - Linux < 3.5.1, futex_wait_requeue_pi possible DoS.
+ // https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-6647
+ // * CVE-2014-3153 - Linux <= 3.14.5, futex_requeue, privilege escalation.
+ // https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-3153
+ //
+ // If the above were all addressed, we still need a runtime check to deal with
+ // the bug below.
+ // * glibc Bug 14652: https://sourceware.org/bugzilla/show_bug.cgi?id=14652
+ // Fixed in glibc 2.17.
+ // Priority inheritance mutexes may deadlock with condition variables
+ // during reacquisition of the mutex after the condition variable is
+ // signalled.
+ return false;
+#endif
+}
+
+} // namespace internal
+} // namespace base
diff --git a/security/sandbox/chromium/base/synchronization/lock_impl_win.cc b/security/sandbox/chromium/base/synchronization/lock_impl_win.cc
new file mode 100644
index 0000000000..e0c4e9d7fc
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/lock_impl_win.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/lock_impl.h"
+
+#include "base/debug/activity_tracker.h"
+
+#include <windows.h>
+
+namespace base {
+namespace internal {
+
+LockImpl::LockImpl() : native_handle_(SRWLOCK_INIT) {}
+
+LockImpl::~LockImpl() = default;
+
+bool LockImpl::Try() {
+ return !!::TryAcquireSRWLockExclusive(
+ reinterpret_cast<PSRWLOCK>(&native_handle_));
+}
+
+void LockImpl::Lock() {
+ // The ScopedLockAcquireActivity below is relatively expensive and so its
+ // actions can become significant due to the very large number of locks
+ // that tend to be used throughout the build. To avoid this cost in the
+ // vast majority of the calls, simply "try" the lock first and only do the
+ // (tracked) blocking call if that fails. Since "try" itself is a system
+ // call, and thus also somewhat expensive, don't bother with it unless
+ // tracking is actually enabled.
+ if (base::debug::GlobalActivityTracker::IsEnabled())
+ if (Try())
+ return;
+
+ base::debug::ScopedLockAcquireActivity lock_activity(this);
+ ::AcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&native_handle_));
+}
+
+} // namespace internal
+} // namespace base
diff --git a/security/sandbox/chromium/base/synchronization/waitable_event.h b/security/sandbox/chromium/base/synchronization/waitable_event.h
new file mode 100644
index 0000000000..8f78084e0d
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/waitable_event.h
@@ -0,0 +1,291 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
+#define BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#elif defined(OS_MACOSX)
+#include <mach/mach.h>
+
+#include <list>
+#include <memory>
+
+#include "base/callback_forward.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <list>
+#include <utility>
+
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#endif
+
+namespace base {
+
+class TimeDelta;
+
+// A WaitableEvent can be a useful thread synchronization tool when you want to
+// allow one thread to wait for another thread to finish some work. For
+// non-Windows systems, this can only be used from within a single address
+// space.
+//
+// Use a WaitableEvent when you would otherwise use a Lock+ConditionVariable to
+// protect a simple boolean value. However, if you find yourself using a
+// WaitableEvent in conjunction with a Lock to wait for a more complex state
+// change (e.g., for an item to be added to a queue), then you should probably
+// be using a ConditionVariable instead of a WaitableEvent.
+//
+// NOTE: On Windows, this class provides a subset of the functionality afforded
+// by a Windows event object. This is intentional. If you are writing Windows
+// specific code and you need other features of a Windows event, then you might
+// be better off just using an Windows event directly.
+class BASE_EXPORT WaitableEvent {
+ public:
+ // Indicates whether a WaitableEvent should automatically reset the event
+ // state after a single waiting thread has been released or remain signaled
+ // until Reset() is manually invoked.
+ enum class ResetPolicy { MANUAL, AUTOMATIC };
+
+ // Indicates whether a new WaitableEvent should start in a signaled state or
+ // not.
+ enum class InitialState { SIGNALED, NOT_SIGNALED };
+
+ // Constructs a WaitableEvent with policy and initial state as detailed in
+ // the above enums.
+ WaitableEvent(ResetPolicy reset_policy = ResetPolicy::MANUAL,
+ InitialState initial_state = InitialState::NOT_SIGNALED);
+
+#if defined(OS_WIN)
+ // Create a WaitableEvent from an Event HANDLE which has already been
+ // created. This objects takes ownership of the HANDLE and will close it when
+ // deleted.
+ explicit WaitableEvent(win::ScopedHandle event_handle);
+#endif
+
+ ~WaitableEvent();
+
+ // Put the event in the un-signaled state.
+ void Reset();
+
+ // Put the event in the signaled state. Causing any thread blocked on Wait
+ // to be woken up.
+ void Signal();
+
+ // Returns true if the event is in the signaled state, else false. If this
+ // is not a manual reset event, then this test will cause a reset.
+ bool IsSignaled();
+
+ // Wait indefinitely for the event to be signaled. Wait's return "happens
+ // after" |Signal| has completed. This means that it's safe for a
+ // WaitableEvent to synchronise its own destruction, like this:
+ //
+ // WaitableEvent *e = new WaitableEvent;
+ // SendToOtherThread(e);
+ // e->Wait();
+ // delete e;
+ void Wait();
+
+ // Wait up until wait_delta has passed for the event to be signaled
+ // (real-time; ignores time overrides). Returns true if the event was
+ // signaled. Handles spurious wakeups and guarantees that |wait_delta| will
+ // have elapsed if this returns false.
+ //
+ // TimedWait can synchronise its own destruction like |Wait|.
+ bool TimedWait(const TimeDelta& wait_delta);
+
+#if defined(OS_WIN)
+ HANDLE handle() const { return handle_.Get(); }
+#endif
+
+ // Declares that this WaitableEvent will only ever be used by a thread that is
+ // idle at the bottom of its stack and waiting for work (in particular, it is
+ // not synchronously waiting on this event before resuming ongoing work). This
+ // is useful to avoid telling base-internals that this thread is "blocked"
+ // when it's merely idle and ready to do work. As such, this is only expected
+ // to be used by thread and thread pool impls.
+ void declare_only_used_while_idle() { waiting_is_blocking_ = false; }
+
+ // Wait, synchronously, on multiple events.
+ // waitables: an array of WaitableEvent pointers
+ // count: the number of elements in @waitables
+ //
+ // returns: the index of a WaitableEvent which has been signaled.
+ //
+ // You MUST NOT delete any of the WaitableEvent objects while this wait is
+ // happening, however WaitMany's return "happens after" the |Signal| call
+ // that caused it has completed, like |Wait|.
+ //
+ // If more than one WaitableEvent is signaled to unblock WaitMany, the lowest
+ // index among them is returned.
+ static size_t WaitMany(WaitableEvent** waitables, size_t count);
+
+ // For asynchronous waiting, see WaitableEventWatcher
+
+ // This is a private helper class. It's here because it's used by friends of
+ // this class (such as WaitableEventWatcher) to be able to enqueue elements
+ // of the wait-list
+ class Waiter {
+ public:
+ // Signal the waiter to wake up.
+ //
+ // Consider the case of a Waiter which is in multiple WaitableEvent's
+ // wait-lists. Each WaitableEvent is automatic-reset and two of them are
+ // signaled at the same time. Now, each will wake only the first waiter in
+ // the wake-list before resetting. However, if those two waiters happen to
+ // be the same object (as can happen if another thread didn't have a chance
+ // to dequeue the waiter from the other wait-list in time), two auto-resets
+ // will have happened, but only one waiter has been signaled!
+ //
+ // Because of this, a Waiter may "reject" a wake by returning false. In
+ // this case, the auto-reset WaitableEvent shouldn't act as if anything has
+ // been notified.
+ virtual bool Fire(WaitableEvent* signaling_event) = 0;
+
+ // Waiters may implement this in order to provide an extra condition for
+ // two Waiters to be considered equal. In WaitableEvent::Dequeue, if the
+ // pointers match then this function is called as a final check. See the
+ // comments in ~Handle for why.
+ virtual bool Compare(void* tag) = 0;
+
+ protected:
+ virtual ~Waiter() = default;
+ };
+
+ private:
+ friend class WaitableEventWatcher;
+
+#if defined(OS_WIN)
+ win::ScopedHandle handle_;
+#elif defined(OS_MACOSX)
+ // Prior to macOS 10.12, a TYPE_MACH_RECV dispatch source may not be invoked
+ // immediately. If a WaitableEventWatcher is used on a manual-reset event,
+ // and another thread that is Wait()ing on the event calls Reset()
+ // immediately after waking up, the watcher may not receive the callback.
+ // On macOS 10.12 and higher, dispatch delivery is reliable. But for OSes
+ // prior, a lock-protected list of callbacks is used for manual-reset event
+ // watchers. Automatic-reset events are not prone to this issue, since the
+ // first thread to wake will claim the event.
+ static bool UseSlowWatchList(ResetPolicy policy);
+
+ // Peeks the message queue named by |port| and returns true if a message
+ // is present and false if not. If |dequeue| is true, the messsage will be
+ // drained from the queue. If |dequeue| is false, the queue will only be
+ // peeked. |port| must be a receive right.
+ static bool PeekPort(mach_port_t port, bool dequeue);
+
+ // The Mach receive right is waited on by both WaitableEvent and
+ // WaitableEventWatcher. It is valid to signal and then delete an event, and
+ // a watcher should still be notified. If the right were to be destroyed
+ // immediately, the watcher would not receive the signal. Because Mach
+ // receive rights cannot have a user refcount greater than one, the right
+ // must be reference-counted manually.
+ class ReceiveRight : public RefCountedThreadSafe<ReceiveRight> {
+ public:
+ ReceiveRight(mach_port_t name, bool create_slow_watch_list);
+
+ mach_port_t Name() const { return right_.get(); }
+
+ // This structure is used iff UseSlowWatchList() is true. See the comment
+ // in Signal() for details.
+ struct WatchList {
+ WatchList();
+ ~WatchList();
+
+ // The lock protects a list of closures to be run when the event is
+ // Signal()ed. The closures are invoked on the signaling thread, so they
+ // must be safe to be called from any thread.
+ Lock lock;
+ std::list<OnceClosure> list;
+ };
+
+ WatchList* SlowWatchList() const { return slow_watch_list_.get(); }
+
+ private:
+ friend class RefCountedThreadSafe<ReceiveRight>;
+ ~ReceiveRight();
+
+ mac::ScopedMachReceiveRight right_;
+
+ // This is allocated iff UseSlowWatchList() is true. It is created on the
+ // heap to avoid performing initialization when not using the slow path.
+ std::unique_ptr<WatchList> slow_watch_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReceiveRight);
+ };
+
+ const ResetPolicy policy_;
+
+ // The receive right for the event.
+ scoped_refptr<ReceiveRight> receive_right_;
+
+ // The send right used to signal the event. This can be disposed of with
+ // the event, unlike the receive right, since a deleted event cannot be
+ // signaled.
+ mac::ScopedMachSendRight send_right_;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ // On Windows, you must not close a HANDLE which is currently being waited on.
+ // The MSDN documentation says that the resulting behaviour is 'undefined'.
+ // To solve that issue each WaitableEventWatcher duplicates the given event
+ // handle.
+
+ // However, if we were to include the following members
+ // directly then, on POSIX, one couldn't use WaitableEventWatcher to watch an
+ // event which gets deleted. This mismatch has bitten us several times now,
+ // so we have a kernel of the WaitableEvent, which is reference counted.
+ // WaitableEventWatchers may then take a reference and thus match the Windows
+ // behaviour.
+ struct WaitableEventKernel :
+ public RefCountedThreadSafe<WaitableEventKernel> {
+ public:
+ WaitableEventKernel(ResetPolicy reset_policy, InitialState initial_state);
+
+ bool Dequeue(Waiter* waiter, void* tag);
+
+ base::Lock lock_;
+ const bool manual_reset_;
+ bool signaled_;
+ std::list<Waiter*> waiters_;
+
+ private:
+ friend class RefCountedThreadSafe<WaitableEventKernel>;
+ ~WaitableEventKernel();
+ };
+
+ typedef std::pair<WaitableEvent*, size_t> WaiterAndIndex;
+
+ // When dealing with arrays of WaitableEvent*, we want to sort by the address
+ // of the WaitableEvent in order to have a globally consistent locking order.
+ // In that case we keep them, in sorted order, in an array of pairs where the
+ // second element is the index of the WaitableEvent in the original,
+ // unsorted, array.
+ static size_t EnqueueMany(WaiterAndIndex* waitables,
+ size_t count, Waiter* waiter);
+
+ bool SignalAll();
+ bool SignalOne();
+ void Enqueue(Waiter* waiter);
+
+ scoped_refptr<WaitableEventKernel> kernel_;
+#endif
+
+ // Whether a thread invoking Wait() on this WaitableEvent should be considered
+ // blocked as opposed to idle (and potentially replaced if part of a pool).
+ bool waiting_is_blocking_ = true;
+
+ DISALLOW_COPY_AND_ASSIGN(WaitableEvent);
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
diff --git a/security/sandbox/chromium/base/synchronization/waitable_event_posix.cc b/security/sandbox/chromium/base/synchronization/waitable_event_posix.cc
new file mode 100644
index 0000000000..effa899191
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/waitable_event_posix.cc
@@ -0,0 +1,445 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "base/debug/activity_tracker.h"
+#include "base/logging.h"
+#include "base/optional.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "base/time/time_override.h"
+
+// -----------------------------------------------------------------------------
+// A WaitableEvent on POSIX is implemented as a wait-list. Currently we don't
+// support cross-process events (where one process can signal an event which
+// others are waiting on). Because of this, we can avoid having one thread per
+// listener in several cases.
+//
+// The WaitableEvent maintains a list of waiters, protected by a lock. Each
+// waiter is either an async wait, in which case we have a Task and the
+// MessageLoop to run it on, or a blocking wait, in which case we have the
+// condition variable to signal.
+//
+// Waiting involves grabbing the lock and adding oneself to the wait list. Async
+// waits can be canceled, which means grabbing the lock and removing oneself
+// from the list.
+//
+// Waiting on multiple events is handled by adding a single, synchronous wait to
+// the wait-list of many events. An event passes a pointer to itself when
+// firing a waiter and so we can store that pointer to find out which event
+// triggered.
+// -----------------------------------------------------------------------------
+
+namespace base {
+
+// -----------------------------------------------------------------------------
+// This is just an abstract base class for waking the two types of waiters
+// -----------------------------------------------------------------------------
+WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
+ InitialState initial_state)
+ : kernel_(new WaitableEventKernel(reset_policy, initial_state)) {}
+
+WaitableEvent::~WaitableEvent() = default;
+
+void WaitableEvent::Reset() {
+ base::AutoLock locked(kernel_->lock_);
+ kernel_->signaled_ = false;
+}
+
+void WaitableEvent::Signal() {
+ base::AutoLock locked(kernel_->lock_);
+
+ if (kernel_->signaled_)
+ return;
+
+ if (kernel_->manual_reset_) {
+ SignalAll();
+ kernel_->signaled_ = true;
+ } else {
+ // In the case of auto reset, if no waiters were woken, we remain
+ // signaled.
+ if (!SignalOne())
+ kernel_->signaled_ = true;
+ }
+}
+
+bool WaitableEvent::IsSignaled() {
+ base::AutoLock locked(kernel_->lock_);
+
+ const bool result = kernel_->signaled_;
+ if (result && !kernel_->manual_reset_)
+ kernel_->signaled_ = false;
+ return result;
+}
+
+// -----------------------------------------------------------------------------
+// Synchronous waits
+
+// -----------------------------------------------------------------------------
+// This is a synchronous waiter. The thread is waiting on the given condition
+// variable and the fired flag in this object.
+// -----------------------------------------------------------------------------
+class SyncWaiter : public WaitableEvent::Waiter {
+ public:
+ SyncWaiter()
+ : fired_(false), signaling_event_(nullptr), lock_(), cv_(&lock_) {}
+
+ bool Fire(WaitableEvent* signaling_event) override {
+ base::AutoLock locked(lock_);
+
+ if (fired_)
+ return false;
+
+ fired_ = true;
+ signaling_event_ = signaling_event;
+
+ cv_.Broadcast();
+
+ // Unlike AsyncWaiter objects, SyncWaiter objects are stack-allocated on
+ // the blocking thread's stack. There is no |delete this;| in Fire. The
+ // SyncWaiter object is destroyed when it goes out of scope.
+
+ return true;
+ }
+
+ WaitableEvent* signaling_event() const {
+ return signaling_event_;
+ }
+
+ // ---------------------------------------------------------------------------
+ // These waiters are always stack allocated and don't delete themselves. Thus
+ // there's no problem and the ABA tag is the same as the object pointer.
+ // ---------------------------------------------------------------------------
+ bool Compare(void* tag) override { return this == tag; }
+
+ // ---------------------------------------------------------------------------
+ // Called with lock held.
+ // ---------------------------------------------------------------------------
+ bool fired() const {
+ return fired_;
+ }
+
+ // ---------------------------------------------------------------------------
+ // During a TimedWait, we need a way to make sure that an auto-reset
+ // WaitableEvent doesn't think that this event has been signaled between
+ // unlocking it and removing it from the wait-list. Called with lock held.
+ // ---------------------------------------------------------------------------
+ void Disable() {
+ fired_ = true;
+ }
+
+ base::Lock* lock() {
+ return &lock_;
+ }
+
+ base::ConditionVariable* cv() {
+ return &cv_;
+ }
+
+ private:
+ bool fired_;
+ WaitableEvent* signaling_event_; // The WaitableEvent which woke us
+ base::Lock lock_;
+ base::ConditionVariable cv_;
+};
+
+void WaitableEvent::Wait() {
+ bool result = TimedWait(TimeDelta::Max());
+ DCHECK(result) << "TimedWait() should never fail with infinite timeout";
+}
+
+bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
+ if (wait_delta <= TimeDelta())
+ return IsSignaled();
+
+ // Record the event that this thread is blocking upon (for hang diagnosis) and
+ // consider it blocked for scheduling purposes. Ignore this for non-blocking
+ // WaitableEvents.
+ Optional<debug::ScopedEventWaitActivity> event_activity;
+ Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
+ scoped_blocking_call;
+ if (waiting_is_blocking_) {
+ event_activity.emplace(this);
+ scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
+ }
+
+ kernel_->lock_.Acquire();
+ if (kernel_->signaled_) {
+ if (!kernel_->manual_reset_) {
+ // In this case we were signaled when we had no waiters. Now that
+ // someone has waited upon us, we can automatically reset.
+ kernel_->signaled_ = false;
+ }
+
+ kernel_->lock_.Release();
+ return true;
+ }
+
+ SyncWaiter sw;
+ if (!waiting_is_blocking_)
+ sw.cv()->declare_only_used_while_idle();
+ sw.lock()->Acquire();
+
+ Enqueue(&sw);
+ kernel_->lock_.Release();
+ // We are violating locking order here by holding the SyncWaiter lock but not
+ // the WaitableEvent lock. However, this is safe because we don't lock |lock_|
+ // again before unlocking it.
+
+ // TimeTicks takes care of overflow but we special case is_max() nonetheless
+ // to avoid invoking TimeTicksNowIgnoringOverride() unnecessarily (same for
+ // the increment step of the for loop if the condition variable returns
+ // early). Ref: https://crbug.com/910524#c7
+ const TimeTicks end_time =
+ wait_delta.is_max() ? TimeTicks::Max()
+ : subtle::TimeTicksNowIgnoringOverride() + wait_delta;
+ for (TimeDelta remaining = wait_delta; remaining > TimeDelta() && !sw.fired();
+ remaining = end_time.is_max()
+ ? TimeDelta::Max()
+ : end_time - subtle::TimeTicksNowIgnoringOverride()) {
+ if (end_time.is_max())
+ sw.cv()->Wait();
+ else
+ sw.cv()->TimedWait(remaining);
+ }
+
+ // Get the SyncWaiter signaled state before releasing the lock.
+ const bool return_value = sw.fired();
+
+ // We can't acquire |lock_| before releasing the SyncWaiter lock (because of
+ // locking order), however, in between the two a signal could be fired and
+ // |sw| would accept it, however we will still return false, so the signal
+ // would be lost on an auto-reset WaitableEvent. Thus we call Disable which
+ // makes sw::Fire return false.
+ sw.Disable();
+ sw.lock()->Release();
+
+ // This is a bug that has been enshrined in the interface of WaitableEvent
+ // now: |Dequeue| is called even when |sw.fired()| is true, even though it'll
+ // always return false in that case. However, taking the lock ensures that
+ // |Signal| has completed before we return and means that a WaitableEvent can
+ // synchronise its own destruction.
+ kernel_->lock_.Acquire();
+ kernel_->Dequeue(&sw, &sw);
+ kernel_->lock_.Release();
+
+ return return_value;
+}
+
+// -----------------------------------------------------------------------------
+// Synchronous waiting on multiple objects.
+
+static bool // StrictWeakOrdering
+cmp_fst_addr(const std::pair<WaitableEvent*, unsigned> &a,
+ const std::pair<WaitableEvent*, unsigned> &b) {
+ return a.first < b.first;
+}
+
+// static
+size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
+ size_t count) {
+ DCHECK(count) << "Cannot wait on no events";
+ internal::ScopedBlockingCallWithBaseSyncPrimitives scoped_blocking_call(
+ FROM_HERE, BlockingType::MAY_BLOCK);
+ // Record an event (the first) that this thread is blocking upon.
+ debug::ScopedEventWaitActivity event_activity(raw_waitables[0]);
+
+ // We need to acquire the locks in a globally consistent order. Thus we sort
+ // the array of waitables by address. We actually sort a pairs so that we can
+ // map back to the original index values later.
+ std::vector<std::pair<WaitableEvent*, size_t> > waitables;
+ waitables.reserve(count);
+ for (size_t i = 0; i < count; ++i)
+ waitables.push_back(std::make_pair(raw_waitables[i], i));
+
+ DCHECK_EQ(count, waitables.size());
+
+ sort(waitables.begin(), waitables.end(), cmp_fst_addr);
+
+ // The set of waitables must be distinct. Since we have just sorted by
+ // address, we can check this cheaply by comparing pairs of consecutive
+ // elements.
+ for (size_t i = 0; i < waitables.size() - 1; ++i) {
+ DCHECK(waitables[i].first != waitables[i+1].first);
+ }
+
+ SyncWaiter sw;
+
+ const size_t r = EnqueueMany(&waitables[0], count, &sw);
+ if (r < count) {
+ // One of the events is already signaled. The SyncWaiter has not been
+ // enqueued anywhere.
+ return waitables[r].second;
+ }
+
+ // At this point, we hold the locks on all the WaitableEvents and we have
+ // enqueued our waiter in them all.
+ sw.lock()->Acquire();
+ // Release the WaitableEvent locks in the reverse order
+ for (size_t i = 0; i < count; ++i) {
+ waitables[count - (1 + i)].first->kernel_->lock_.Release();
+ }
+
+ for (;;) {
+ if (sw.fired())
+ break;
+
+ sw.cv()->Wait();
+ }
+ sw.lock()->Release();
+
+ // The address of the WaitableEvent which fired is stored in the SyncWaiter.
+ WaitableEvent *const signaled_event = sw.signaling_event();
+ // This will store the index of the raw_waitables which fired.
+ size_t signaled_index = 0;
+
+ // Take the locks of each WaitableEvent in turn (except the signaled one) and
+ // remove our SyncWaiter from the wait-list
+ for (size_t i = 0; i < count; ++i) {
+ if (raw_waitables[i] != signaled_event) {
+ raw_waitables[i]->kernel_->lock_.Acquire();
+ // There's no possible ABA issue with the address of the SyncWaiter here
+ // because it lives on the stack. Thus the tag value is just the pointer
+ // value again.
+ raw_waitables[i]->kernel_->Dequeue(&sw, &sw);
+ raw_waitables[i]->kernel_->lock_.Release();
+ } else {
+ // By taking this lock here we ensure that |Signal| has completed by the
+ // time we return, because |Signal| holds this lock. This matches the
+ // behaviour of |Wait| and |TimedWait|.
+ raw_waitables[i]->kernel_->lock_.Acquire();
+ raw_waitables[i]->kernel_->lock_.Release();
+ signaled_index = i;
+ }
+ }
+
+ return signaled_index;
+}
+
+// -----------------------------------------------------------------------------
+// If return value == count:
+// The locks of the WaitableEvents have been taken in order and the Waiter has
+// been enqueued in the wait-list of each. None of the WaitableEvents are
+// currently signaled
+// else:
+// None of the WaitableEvent locks are held. The Waiter has not been enqueued
+// in any of them and the return value is the index of the WaitableEvent which
+// was signaled with the lowest input index from the original WaitMany call.
+// -----------------------------------------------------------------------------
+// static
+size_t WaitableEvent::EnqueueMany(std::pair<WaitableEvent*, size_t>* waitables,
+ size_t count,
+ Waiter* waiter) {
+ size_t winner = count;
+ size_t winner_index = count;
+ for (size_t i = 0; i < count; ++i) {
+ auto& kernel = waitables[i].first->kernel_;
+ kernel->lock_.Acquire();
+ if (kernel->signaled_ && waitables[i].second < winner) {
+ winner = waitables[i].second;
+ winner_index = i;
+ }
+ }
+
+ // No events signaled. All locks acquired. Enqueue the Waiter on all of them
+ // and return.
+ if (winner == count) {
+ for (size_t i = 0; i < count; ++i)
+ waitables[i].first->Enqueue(waiter);
+ return count;
+ }
+
+ // Unlock in reverse order and possibly clear the chosen winner's signal
+ // before returning its index.
+ for (auto* w = waitables + count - 1; w >= waitables; --w) {
+ auto& kernel = w->first->kernel_;
+ if (w->second == winner) {
+ if (!kernel->manual_reset_)
+ kernel->signaled_ = false;
+ }
+ kernel->lock_.Release();
+ }
+
+ return winner_index;
+}
+
+// -----------------------------------------------------------------------------
+
+
+// -----------------------------------------------------------------------------
+// Private functions...
+
+WaitableEvent::WaitableEventKernel::WaitableEventKernel(
+ ResetPolicy reset_policy,
+ InitialState initial_state)
+ : manual_reset_(reset_policy == ResetPolicy::MANUAL),
+ signaled_(initial_state == InitialState::SIGNALED) {}
+
+WaitableEvent::WaitableEventKernel::~WaitableEventKernel() = default;
+
+// -----------------------------------------------------------------------------
+// Wake all waiting waiters. Called with lock held.
+// -----------------------------------------------------------------------------
+bool WaitableEvent::SignalAll() {
+ bool signaled_at_least_one = false;
+
+ for (auto* i : kernel_->waiters_) {
+ if (i->Fire(this))
+ signaled_at_least_one = true;
+ }
+
+ kernel_->waiters_.clear();
+ return signaled_at_least_one;
+}
+
+// ---------------------------------------------------------------------------
+// Try to wake a single waiter. Return true if one was woken. Called with lock
+// held.
+// ---------------------------------------------------------------------------
+bool WaitableEvent::SignalOne() {
+ for (;;) {
+ if (kernel_->waiters_.empty())
+ return false;
+
+ const bool r = (*kernel_->waiters_.begin())->Fire(this);
+ kernel_->waiters_.pop_front();
+ if (r)
+ return true;
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Add a waiter to the list of those waiting. Called with lock held.
+// -----------------------------------------------------------------------------
+void WaitableEvent::Enqueue(Waiter* waiter) {
+ kernel_->waiters_.push_back(waiter);
+}
+
+// -----------------------------------------------------------------------------
+// Remove a waiter from the list of those waiting. Return true if the waiter was
+// actually removed. Called with lock held.
+// -----------------------------------------------------------------------------
+bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) {
+ for (auto i = waiters_.begin(); i != waiters_.end(); ++i) {
+ if (*i == waiter && (*i)->Compare(tag)) {
+ waiters_.erase(i);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// -----------------------------------------------------------------------------
+
+} // namespace base